From c1e00ef2a6442d1d047950247c757821560db329 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Tue, 10 May 2016 22:21:10 -0700 Subject: [PATCH] Resolve bootstrapping issues when embedded in FreeBSD libc. b2c0d6322d2307458ae2b28545f8a5c9903d7ef5 (Add witness, a simple online locking validator.) caused a broad propagation of tsd throughout the internal API, but tsd_fetch() was designed to fail prior to tsd bootstrapping. Fix this by splitting tsd_t into non-nullable tsd_t and nullable tsdn_t, and modifying all internal APIs that do not critically rely on tsd to take nullable pointers. Furthermore, add the tsd_booted_get() function so that tsdn_fetch() can probe whether tsd bootstrapping is complete and return NULL if not. All dangerous conversions of nullable pointers are tsdn_tsd() calls that assert-fail on invalid conversion. --- include/jemalloc/internal/arena.h | 201 +++--- include/jemalloc/internal/base.h | 10 +- include/jemalloc/internal/chunk.h | 22 +- include/jemalloc/internal/chunk_dss.h | 14 +- include/jemalloc/internal/ckh.h | 8 +- include/jemalloc/internal/ctl.h | 10 +- include/jemalloc/internal/huge.h | 20 +- .../jemalloc/internal/jemalloc_internal.h.in | 103 +-- include/jemalloc/internal/mutex.h | 32 +- include/jemalloc/internal/private_symbols.txt | 7 +- include/jemalloc/internal/prof.h | 74 +-- include/jemalloc/internal/tcache.h | 33 +- include/jemalloc/internal/tsd.h | 76 +++ include/jemalloc/internal/valgrind.h | 12 +- include/jemalloc/internal/witness.h | 10 +- src/arena.c | 585 +++++++++--------- src/base.c | 45 +- src/chunk.c | 146 ++--- src/chunk_dss.c | 42 +- src/ckh.c | 42 +- src/ctl.c | 229 +++---- src/huge.c | 152 ++--- src/jemalloc.c | 518 ++++++++-------- src/mutex.c | 12 +- src/prof.c | 475 +++++++------- src/quarantine.c | 44 +- src/tcache.c | 123 ++-- src/witness.c | 36 +- src/zone.c | 8 +- test/unit/arena_reset.c | 8 +- test/unit/ckh.c | 46 +- test/unit/junk.c | 4 +- test/unit/prof_reset.c | 2 +- test/unit/witness.c | 116 ++-- 34 files changed, 1709 insertions(+), 1556 deletions(-) diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index debb43f3..b1de2b61 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -518,28 +518,28 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache); void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool cache); -extent_node_t *arena_node_alloc(tsd_t *tsd, arena_t *arena); -void arena_node_dalloc(tsd_t *tsd, arena_t *arena, extent_node_t *node); -void *arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize, +extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena); +void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node); +void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero); -void arena_chunk_dalloc_huge(tsd_t *tsd, arena_t *arena, void *chunk, +void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize); -void arena_chunk_ralloc_huge_similar(tsd_t *tsd, arena_t *arena, void *chunk, - size_t oldsize, size_t usize); -void arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk, - size_t oldsize, size_t usize); -bool arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk, - size_t oldsize, size_t usize, bool *zero); -ssize_t arena_lg_dirty_mult_get(tsd_t *tsd, arena_t *arena); -bool arena_lg_dirty_mult_set(tsd_t *tsd, arena_t *arena, +void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize); +bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, + void *chunk, size_t oldsize, size_t usize, bool *zero); +ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); +bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult); -ssize_t arena_decay_time_get(tsd_t *tsd, arena_t *arena); -bool arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time); -void arena_purge(tsd_t *tsd, arena_t *arena, bool all); -void arena_maybe_purge(tsd_t *tsd, arena_t *arena); +ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena); +bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time); +void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all); +void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena); void arena_reset(tsd_t *tsd, arena_t *arena); -void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, - szind_t binind, uint64_t prof_accumbytes); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero); #ifdef JEMALLOC_JET @@ -552,17 +552,18 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); #endif void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero); -void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, +void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind, bool zero); -void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache); -void arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size); -void arena_dalloc_bin_junked_locked(tsd_t *tsd, arena_t *arena, +void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm); -void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind); #ifdef JEMALLOC_JET typedef void (arena_dalloc_junk_large_t)(void *, size_t); @@ -570,28 +571,28 @@ extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; #else void arena_dalloc_junk_large(void *ptr, size_t usize); #endif -void arena_dalloc_large_junked_locked(tsd_t *tsd, arena_t *arena, +void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr); -void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr); #ifdef JEMALLOC_JET typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; #endif -bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t size, size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache); -dss_prec_t arena_dss_prec_get(tsd_t *tsd, arena_t *arena); -bool arena_dss_prec_set(tsd_t *tsd, arena_t *arena, dss_prec_t dss_prec); +dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena); +bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult); ssize_t arena_decay_time_default_get(void); bool arena_decay_time_default_set(ssize_t decay_time); -void arena_basic_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, - const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, - size_t *nactive, size_t *ndirty); -void arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, + ssize_t *decay_time, size_t *nactive, size_t *ndirty); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, @@ -599,14 +600,14 @@ void arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, unsigned arena_nthreads_get(arena_t *arena, bool internal); void arena_nthreads_inc(arena_t *arena, bool internal); void arena_nthreads_dec(arena_t *arena, bool internal); -arena_t *arena_new(tsd_t *tsd, unsigned ind); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind); bool arena_boot(void); -void arena_prefork0(tsd_t *tsd, arena_t *arena); -void arena_prefork1(tsd_t *tsd, arena_t *arena); -void arena_prefork2(tsd_t *tsd, arena_t *arena); -void arena_prefork3(tsd_t *tsd, arena_t *arena); -void arena_postfork_parent(tsd_t *tsd, arena_t *arena); -void arena_postfork_child(tsd_t *tsd, arena_t *arena); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ @@ -663,24 +664,24 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size); size_t arena_metadata_allocated_get(arena_t *arena); bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(tsd_t *tsd, arena_t *arena, uint64_t accumbytes); +bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes); szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin); size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr); -prof_tctx_t *arena_prof_tctx_get(tsd_t *tsd, const void *ptr); -void arena_prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize, +prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); -void arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize, +void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx); -void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks); -void arena_decay_tick(tsd_t *tsd, arena_t *arena); -void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, +void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks); +void arena_decay_tick(tsdn_t *tsdn, arena_t *arena); +void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path); arena_t *arena_aalloc(const void *ptr); -size_t arena_salloc(tsd_t *tsd, const void *ptr, bool demote); -void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, +size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote); +void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path); +void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, bool slow_path); #endif @@ -1056,7 +1057,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) } JEMALLOC_INLINE bool -arena_prof_accum(tsd_t *tsd, arena_t *arena, uint64_t accumbytes) +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { cassert(config_prof); @@ -1067,9 +1068,9 @@ arena_prof_accum(tsd_t *tsd, arena_t *arena, uint64_t accumbytes) { bool ret; - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } } @@ -1205,7 +1206,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) } JEMALLOC_INLINE prof_tctx_t * -arena_prof_tctx_get(tsd_t *tsd, const void *ptr) +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; @@ -1226,13 +1227,13 @@ arena_prof_tctx_get(tsd_t *tsd, const void *ptr) ret = atomic_read_p(&elm->prof_tctx_pun); } } else - ret = huge_prof_tctx_get(tsd, ptr); + ret = huge_prof_tctx_get(tsdn, ptr); return (ret); } JEMALLOC_INLINE void -arena_prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize, +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { arena_chunk_t *chunk; @@ -1264,11 +1265,11 @@ arena_prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize, assert(arena_mapbits_large_get(chunk, pageind) == 0); } } else - huge_prof_tctx_set(tsd, ptr, tctx); + huge_prof_tctx_set(tsdn, ptr, tctx); } JEMALLOC_INLINE void -arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize, +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { @@ -1292,52 +1293,55 @@ arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize, atomic_write_p(&elm->prof_tctx_pun, (prof_tctx_t *)(uintptr_t)1U); } else - huge_prof_tctx_reset(tsd, ptr); + huge_prof_tctx_reset(tsdn, ptr); } } JEMALLOC_ALWAYS_INLINE void -arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks) +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { + tsd_t *tsd; ticker_t *decay_ticker; - if (unlikely(tsd == NULL)) + if (unlikely(tsdn_null(tsdn))) return; + tsd = tsdn_tsd(tsdn); decay_ticker = decay_ticker_get(tsd, arena->ind); if (unlikely(decay_ticker == NULL)) return; if (unlikely(ticker_ticks(decay_ticker, nticks))) - arena_purge(tsd, arena, false); + arena_purge(tsdn, arena, false); } JEMALLOC_ALWAYS_INLINE void -arena_decay_tick(tsd_t *tsd, arena_t *arena) +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { - arena_decay_ticks(tsd, arena, 1); + arena_decay_ticks(tsdn, arena, 1); } JEMALLOC_ALWAYS_INLINE void * -arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero, +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); assert(size != 0); if (likely(tcache != NULL)) { if (likely(size <= SMALL_MAXCLASS)) { - return (tcache_alloc_small(tsd, arena, tcache, size, - ind, zero, slow_path)); + return (tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); } if (likely(size <= tcache_maxclass)) { - return (tcache_alloc_large(tsd, arena, tcache, size, - ind, zero, slow_path)); + return (tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path)); } /* (size > tcache_maxclass) case falls through. */ assert(size > tcache_maxclass); } - return (arena_malloc_hard(tsd, arena, size, ind, zero)); + return (arena_malloc_hard(tsdn, arena, size, ind, zero)); } JEMALLOC_ALWAYS_INLINE arena_t * @@ -1354,7 +1358,7 @@ arena_aalloc(const void *ptr) /* Return the size of the allocation pointed to by ptr. */ JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(tsd_t *tsd, const void *ptr, bool demote) +arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote) { size_t ret; arena_chunk_t *chunk; @@ -1397,17 +1401,18 @@ arena_salloc(tsd_t *tsd, const void *ptr, bool demote) ret = index2size(binind); } } else - ret = huge_salloc(tsd, ptr); + ret = huge_salloc(tsdn, ptr); return (ret); } JEMALLOC_ALWAYS_INLINE void -arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path) { arena_chunk_t *chunk; size_t pageind, mapbits; + assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); @@ -1420,11 +1425,12 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (likely(tcache != NULL)) { szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tsd, tcache, ptr, binind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); } else { - arena_dalloc_small(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr, pageind); + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); } } else { size_t size = arena_mapbits_large_size_get(chunk, @@ -1435,23 +1441,26 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (likely(tcache != NULL) && size - large_pad <= tcache_maxclass) { - tcache_dalloc_large(tsd, tcache, ptr, size - - large_pad, slow_path); + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size - large_pad, slow_path); } else { - arena_dalloc_large(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr); + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); } } } else - huge_dalloc(tsd, ptr); + huge_dalloc(tsdn, ptr); } JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, bool slow_path) { arena_chunk_t *chunk; + assert(!tsdn_null(tsdn) || tcache == NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (likely(chunk != ptr)) { if (config_prof && opt_prof) { @@ -1468,34 +1477,36 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, pageind) - large_pad; } } - assert(s2u(size) == s2u(arena_salloc(tsd, ptr, false))); + assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false))); if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ if (likely(tcache != NULL)) { szind_t binind = size2index(size); - tcache_dalloc_small(tsd, tcache, ptr, binind, - slow_path); + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, + binind, slow_path); } else { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr, pageind); + arena_dalloc_small(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr, pageind); } } else { assert(config_cache_oblivious || ((uintptr_t)ptr & PAGE_MASK) == 0); if (likely(tcache != NULL) && size <= tcache_maxclass) { - tcache_dalloc_large(tsd, tcache, ptr, size, - slow_path); + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + size, slow_path); } else { - arena_dalloc_large(tsd, extent_node_arena_get( - &chunk->node), chunk, ptr); + arena_dalloc_large(tsdn, + extent_node_arena_get(&chunk->node), chunk, + ptr); } } } else - huge_dalloc(tsd, ptr); + huge_dalloc(tsdn, ptr); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h index 075a2a20..d6b81e16 100644 --- a/include/jemalloc/internal/base.h +++ b/include/jemalloc/internal/base.h @@ -9,13 +9,13 @@ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -void *base_alloc(tsd_t *tsd, size_t size); -void base_stats_get(tsd_t *tsd, size_t *allocated, size_t *resident, +void *base_alloc(tsdn_t *tsdn, size_t size); +void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, size_t *mapped); bool base_boot(void); -void base_prefork(tsd_t *tsd); -void base_postfork_parent(tsd_t *tsd); -void base_postfork_child(tsd_t *tsd); +void base_prefork(tsdn_t *tsdn); +void base_postfork_parent(tsdn_t *tsdn); +void base_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h index 6c3ad9bf..c9fd4ecb 100644 --- a/include/jemalloc/internal/chunk.h +++ b/include/jemalloc/internal/chunk.h @@ -48,32 +48,32 @@ extern size_t chunk_npages; extern const chunk_hooks_t chunk_hooks_default; -chunk_hooks_t chunk_hooks_get(tsd_t *tsd, arena_t *arena); -chunk_hooks_t chunk_hooks_set(tsd_t *tsd, arena_t *arena, +chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); +chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks); -bool chunk_register(tsd_t *tsd, const void *chunk, +bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); -void *chunk_alloc_cache(tsd_t *tsd, arena_t *arena, +void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node); -void *chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, +void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); -void chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, +void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed); -void chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, +void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, bool committed); -bool chunk_purge_wrapper(tsd_t *tsd, arena_t *arena, +bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length); bool chunk_boot(void); -void chunk_prefork(tsd_t *tsd); -void chunk_postfork_parent(tsd_t *tsd); -void chunk_postfork_child(tsd_t *tsd); +void chunk_prefork(tsdn_t *tsdn); +void chunk_postfork_parent(tsdn_t *tsdn); +void chunk_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/chunk_dss.h index 7f3a09c7..724fa579 100644 --- a/include/jemalloc/internal/chunk_dss.h +++ b/include/jemalloc/internal/chunk_dss.h @@ -21,15 +21,15 @@ extern const char *dss_prec_names[]; /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -dss_prec_t chunk_dss_prec_get(tsd_t *tsd); -bool chunk_dss_prec_set(tsd_t *tsd, dss_prec_t dss_prec); -void *chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, +dss_prec_t chunk_dss_prec_get(tsdn_t *tsdn); +bool chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec); +void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); -bool chunk_in_dss(tsd_t *tsd, void *chunk); +bool chunk_in_dss(tsdn_t *tsdn, void *chunk); bool chunk_dss_boot(void); -void chunk_dss_prefork(tsd_t *tsd); -void chunk_dss_postfork_parent(tsd_t *tsd); -void chunk_dss_postfork_child(tsd_t *tsd); +void chunk_dss_prefork(tsdn_t *tsdn); +void chunk_dss_postfork_parent(tsdn_t *tsdn); +void chunk_dss_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/ckh.h b/include/jemalloc/internal/ckh.h index f75ad90b..46e151cd 100644 --- a/include/jemalloc/internal/ckh.h +++ b/include/jemalloc/internal/ckh.h @@ -64,13 +64,13 @@ struct ckh_s { /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, +bool ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); -void ckh_delete(tsd_t *tsd, ckh_t *ckh); +void ckh_delete(tsdn_t *tsdn, ckh_t *ckh); size_t ckh_count(ckh_t *ckh); bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, +bool ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); void ckh_string_hash(const void *key, size_t r_hash[2]); diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index c84c0de9..af0f6d7c 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -27,7 +27,7 @@ struct ctl_named_node_s { struct ctl_indexed_node_s { struct ctl_node_s node; - const ctl_named_node_t *(*index)(tsd_t *, const size_t *, size_t, + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, size_t); }; @@ -72,15 +72,15 @@ struct ctl_stats_s { int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, +int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); -void ctl_prefork(tsd_t *tsd); -void ctl_postfork_parent(tsd_t *tsd); -void ctl_postfork_child(tsd_t *tsd); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h index 9de2055d..b5fa9e63 100644 --- a/include/jemalloc/internal/huge.h +++ b/include/jemalloc/internal/huge.h @@ -9,23 +9,23 @@ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS -void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero); -void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, - bool zero); -bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, +void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero); +bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache); #ifdef JEMALLOC_JET -typedef void (huge_dalloc_junk_t)(tsd_t *, void *, size_t); +typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif -void huge_dalloc(tsd_t *tsd, void *ptr); +void huge_dalloc(tsdn_t *tsdn, void *ptr); arena_t *huge_aalloc(const void *ptr); -size_t huge_salloc(tsd_t *tsd, const void *ptr); -prof_tctx_t *huge_prof_tctx_get(tsd_t *tsd, const void *ptr); -void huge_prof_tctx_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx); -void huge_prof_tctx_reset(tsd_t *tsd, const void *ptr); +size_t huge_salloc(tsdn_t *tsdn, const void *ptr); +prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); +void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 3ce36659..69d94ec5 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -473,7 +473,7 @@ void *bootstrap_malloc(size_t size); void *bootstrap_calloc(size_t num, size_t size); void bootstrap_free(void *ptr); unsigned narenas_total_get(void); -arena_t *arena_init(tsd_t *tsd, unsigned ind); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind); arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); arena_t *arena_choose_hard(tsd_t *tsd, bool internal); void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); @@ -555,10 +555,10 @@ size_t s2u(size_t size); size_t sa2u(size_t size, size_t alignment); arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal); arena_t *arena_choose(tsd_t *tsd, arena_t *arena); -arena_t *arena_ichoose(tsd_t *tsd, arena_t *arena); +arena_t *arena_ichoose(tsdn_t *tsdn, arena_t *arena); arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing); -arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing); +arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing); ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind); #endif @@ -815,13 +815,13 @@ arena_choose(tsd_t *tsd, arena_t *arena) } JEMALLOC_INLINE arena_t * -arena_ichoose(tsd_t *tsd, arena_t *arena) +arena_ichoose(tsdn_t *tsdn, arena_t *arena) { - assert(tsd != NULL || arena != NULL); + assert(!tsdn_null(tsdn) || arena != NULL); - if (tsd != NULL) - return (arena_choose_impl(tsd, NULL, true)); + if (!tsdn_null(tsdn)) + return (arena_choose_impl(tsdn_tsd(tsdn), NULL, true)); return (arena); } @@ -851,7 +851,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) } JEMALLOC_INLINE arena_t * -arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing) +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { arena_t *ret; @@ -861,7 +861,7 @@ arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing) if (unlikely(ret == NULL)) { ret = atomic_read_p((void *)&arenas[ind]); if (init_if_missing && unlikely(ret == NULL)) - ret = arena_init(tsd, ind); + ret = arena_init(tsdn, ind); } return (ret); } @@ -895,24 +895,24 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) #ifndef JEMALLOC_ENABLE_INLINE arena_t *iaalloc(const void *ptr); -size_t isalloc(tsd_t *tsd, const void *ptr, bool demote); -void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, +size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote); +void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path); void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path); -void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena); -void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero); -size_t ivsalloc(tsd_t *tsd, const void *ptr, bool demote); +size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote); size_t u2rz(size_t usize); -size_t p2rz(tsd_t *tsd, const void *ptr); -void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata, +size_t p2rz(tsdn_t *tsdn, const void *ptr); +void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, bool slow_path); void idalloc(tsd_t *tsd, void *ptr); void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path); -void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, +void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, bool slow_path); void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path); @@ -923,7 +923,7 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena); void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero); -bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, +bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero); #endif @@ -939,22 +939,23 @@ iaalloc(const void *ptr) /* * Typical usage: + * tsdn_t *tsdn = [...] * void *ptr = [...] - * size_t sz = isalloc(ptr, config_prof); + * size_t sz = isalloc(tsdn, ptr, config_prof); */ JEMALLOC_ALWAYS_INLINE size_t -isalloc(tsd_t *tsd, const void *ptr, bool demote) +isalloc(tsdn_t *tsdn, const void *ptr, bool demote) { assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || !demote); - return (arena_salloc(tsd, ptr, demote)); + return (arena_salloc(tsdn, ptr, demote)); } JEMALLOC_ALWAYS_INLINE void * -iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache, +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path) { void *ret; @@ -963,10 +964,10 @@ iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache, assert(!is_metadata || tcache == NULL); assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path); + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsd, ret, - config_prof)); + arena_metadata_allocated_add(iaalloc(ret), + isalloc(tsdn, ret, config_prof)); } return (ret); } @@ -975,12 +976,12 @@ JEMALLOC_ALWAYS_INLINE void * ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { - return (iallocztm(tsd, size, ind, zero, tcache_get(tsd, true), false, - NULL, slow_path)); + return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true), + false, NULL, slow_path)); } JEMALLOC_ALWAYS_INLINE void * -ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, bool is_metadata, arena_t *arena) { void *ret; @@ -990,33 +991,33 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, assert(!is_metadata || tcache == NULL); assert(!is_metadata || arena == NULL || arena->ind < narenas_auto); - ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache); + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); if (config_stats && is_metadata && likely(ret != NULL)) { - arena_metadata_allocated_add(iaalloc(ret), isalloc(tsd, ret, + arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret, config_prof)); } return (ret); } JEMALLOC_ALWAYS_INLINE void * -ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) { - return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena)); + return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena)); } JEMALLOC_ALWAYS_INLINE void * ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { - return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true), - false, NULL)); + return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd, true), false, NULL)); } JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(tsd_t *tsd, const void *ptr, bool demote) +ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote) { extent_node_t *node; @@ -1028,7 +1029,7 @@ ivsalloc(tsd_t *tsd, const void *ptr, bool demote) assert(extent_node_addr_get(node) == ptr || extent_node_achunk_get(node)); - return (isalloc(tsd, ptr, demote)); + return (isalloc(tsdn, ptr, demote)); } JEMALLOC_INLINE size_t @@ -1046,15 +1047,15 @@ u2rz(size_t usize) } JEMALLOC_INLINE size_t -p2rz(tsd_t *tsd, const void *ptr) +p2rz(tsdn_t *tsdn, const void *ptr) { - size_t usize = isalloc(tsd, ptr, false); + size_t usize = isalloc(tsdn, ptr, false); return (u2rz(usize)); } JEMALLOC_ALWAYS_INLINE void -idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata, +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata, bool slow_path) { @@ -1062,18 +1063,18 @@ idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata, assert(!is_metadata || tcache == NULL); assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto); if (config_stats && is_metadata) { - arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsd, ptr, + arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr, config_prof)); } - arena_dalloc(tsd, ptr, tcache, slow_path); + arena_dalloc(tsdn, ptr, tcache, slow_path); } JEMALLOC_ALWAYS_INLINE void idalloc(tsd_t *tsd, void *ptr) { - idalloctm(tsd, ptr, tcache_get(tsd, false), false, true); + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true); } JEMALLOC_ALWAYS_INLINE void @@ -1083,14 +1084,15 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) if (slow_path && config_fill && unlikely(opt_quarantine)) quarantine(tsd, ptr); else - idalloctm(tsd, ptr, tcache, false, slow_path); + idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path); } JEMALLOC_ALWAYS_INLINE void -isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + bool slow_path) { - arena_sdalloc(tsd, ptr, size, tcache, slow_path); + arena_sdalloc(tsdn, ptr, size, tcache, slow_path); } JEMALLOC_ALWAYS_INLINE void @@ -1100,7 +1102,7 @@ isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path) if (slow_path && config_fill && unlikely(opt_quarantine)) quarantine(tsd, ptr); else - isdalloct(tsd, ptr, size, tcache, slow_path); + isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path); } JEMALLOC_ALWAYS_INLINE void * @@ -1113,7 +1115,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, usize = sa2u(size + extra, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - p = ipalloct(tsd, usize, alignment, zero, tcache, arena); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena); if (p == NULL) { if (extra == 0) return (NULL); @@ -1121,7 +1123,8 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, usize = sa2u(size, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - p = ipalloct(tsd, usize, alignment, zero, tcache, arena); + p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, + arena); if (p == NULL) return (NULL); } @@ -1167,7 +1170,7 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, } JEMALLOC_ALWAYS_INLINE bool -ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero) { @@ -1180,7 +1183,7 @@ ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra, return (true); } - return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero)); + return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero)); } #endif diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h index 5ddae11c..00f0b91c 100644 --- a/include/jemalloc/internal/mutex.h +++ b/include/jemalloc/internal/mutex.h @@ -59,9 +59,9 @@ extern bool isthreaded; bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank); -void malloc_mutex_prefork(tsd_t *tsd, malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(tsd_t *tsd, malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(tsd_t *tsd, malloc_mutex_t *mutex); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); bool malloc_mutex_boot(void); #endif /* JEMALLOC_H_EXTERNS */ @@ -69,20 +69,20 @@ bool malloc_mutex_boot(void); #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(tsd_t *tsd, malloc_mutex_t *mutex); -void malloc_mutex_unlock(tsd_t *tsd, malloc_mutex_t *mutex); -void malloc_mutex_assert_owner(tsd_t *tsd, malloc_mutex_t *mutex); -void malloc_mutex_assert_not_owner(tsd_t *tsd, malloc_mutex_t *mutex); +void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void -malloc_mutex_lock(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { if (config_debug) - witness_assert_not_owner(tsd, &mutex->witness); + witness_assert_not_owner(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 AcquireSRWLockExclusive(&mutex->lock); @@ -95,17 +95,17 @@ malloc_mutex_lock(tsd_t *tsd, malloc_mutex_t *mutex) pthread_mutex_lock(&mutex->lock); #endif if (config_debug) - witness_lock(tsd, &mutex->witness); + witness_lock(tsdn, &mutex->witness); } } JEMALLOC_INLINE void -malloc_mutex_unlock(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { if (config_debug) - witness_unlock(tsd, &mutex->witness); + witness_unlock(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 ReleaseSRWLockExclusive(&mutex->lock); @@ -121,19 +121,19 @@ malloc_mutex_unlock(tsd_t *tsd, malloc_mutex_t *mutex) } JEMALLOC_INLINE void -malloc_mutex_assert_owner(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded && config_debug) - witness_assert_owner(tsd, &mutex->witness); + witness_assert_owner(tsdn, &mutex->witness); } JEMALLOC_INLINE void -malloc_mutex_assert_not_owner(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded && config_debug) - witness_assert_not_owner(tsd, &mutex->witness); + witness_assert_not_owner(tsdn, &mutex->witness); } #endif diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index 28996206..f2b6a55d 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -496,8 +496,6 @@ tcache_alloc_easy tcache_alloc_large tcache_alloc_small tcache_alloc_small_hard -tcache_arena_associate -tcache_arena_dissociate tcache_arena_reassociate tcache_bin_flush_large tcache_bin_flush_small @@ -543,6 +541,7 @@ tsd_boot tsd_boot0 tsd_boot1 tsd_booted +tsd_booted_get tsd_cleanup tsd_cleanup_wrapper tsd_fetch @@ -581,12 +580,16 @@ tsd_thread_deallocated_set tsd_thread_deallocatedp_get tsd_tls tsd_tsd +tsd_tsdn tsd_witness_fork_get tsd_witness_fork_set tsd_witness_forkp_get tsd_witnesses_get tsd_witnesses_set tsd_witnessesp_get +tsdn_fetch +tsdn_null +tsdn_tsd u2rz valgrind_freelike_block valgrind_make_mem_defined diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 4fe17875..691e153d 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -281,7 +281,7 @@ extern uint64_t prof_interval; extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); -void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize, +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); @@ -293,33 +293,33 @@ size_t prof_bt_count(void); const prof_cnt_t *prof_cnt_all(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; -typedef bool (prof_dump_header_t)(tsd_t *, bool, const prof_cnt_t *); +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); extern prof_dump_header_t *prof_dump_header; #endif -void prof_idump(tsd_t *tsd); +void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); -void prof_gdump(tsd_t *tsd); -prof_tdata_t *prof_tdata_init(tsd_t *tsd); +void prof_gdump(tsdn_t *tsdn); +prof_tdata_t *prof_tdata_init(tsdn_t *tsdn); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); -void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_reset(tsdn_t *tsdn, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); -bool prof_active_get(tsd_t *tsd); -bool prof_active_set(tsd_t *tsd, bool active); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(tsd_t *tsd); bool prof_thread_active_set(tsd_t *tsd, bool active); -bool prof_thread_active_init_get(tsd_t *tsd); -bool prof_thread_active_init_set(tsd_t *tsd, bool active_init); -bool prof_gdump_get(tsd_t *tsd); -bool prof_gdump_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); -bool prof_boot2(tsd_t *tsd); -void prof_prefork0(tsd_t *tsd); -void prof_prefork1(tsd_t *tsd); -void prof_postfork_parent(tsd_t *tsd); -void prof_postfork_child(tsd_t *tsd); +bool prof_boot2(tsdn_t *tsdn); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); #endif /* JEMALLOC_H_EXTERNS */ @@ -330,16 +330,16 @@ void prof_sample_threshold_update(prof_tdata_t *tdata); bool prof_active_get_unlocked(void); bool prof_gdump_get_unlocked(void); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); -prof_tctx_t *prof_tctx_get(tsd_t *tsd, const void *ptr); -void prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize, +prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); +void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); -void prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize, +void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *tctx); bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, prof_tdata_t **tdata_out); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update); -void prof_malloc(tsd_t *tsd, const void *ptr, size_t usize, +void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, @@ -384,7 +384,7 @@ prof_tdata_get(tsd_t *tsd, bool create) if (create) { if (unlikely(tdata == NULL)) { if (tsd_nominal(tsd)) { - tdata = prof_tdata_init(tsd); + tdata = prof_tdata_init(tsd_tsdn(tsd)); tsd_prof_tdata_set(tsd, tdata); } } else if (unlikely(tdata->expired)) { @@ -398,34 +398,34 @@ prof_tdata_get(tsd_t *tsd, bool create) } JEMALLOC_ALWAYS_INLINE prof_tctx_t * -prof_tctx_get(tsd_t *tsd, const void *ptr) +prof_tctx_get(tsdn_t *tsdn, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - return (arena_prof_tctx_get(tsd, ptr)); + return (arena_prof_tctx_get(tsdn, ptr)); } JEMALLOC_ALWAYS_INLINE void -prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); - arena_prof_tctx_set(tsd, ptr, usize, tctx); + arena_prof_tctx_set(tsdn, ptr, usize, tctx); } JEMALLOC_ALWAYS_INLINE void -prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize, const void *old_ptr, +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); - arena_prof_tctx_reset(tsd, ptr, usize, old_ptr, old_tctx); + arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); } JEMALLOC_ALWAYS_INLINE bool @@ -480,17 +480,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) } JEMALLOC_ALWAYS_INLINE void -prof_malloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); - assert(usize == isalloc(tsd, ptr, true)); + assert(usize == isalloc(tsdn, ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) - prof_malloc_sample_object(tsd, ptr, usize, tctx); + prof_malloc_sample_object(tsdn, ptr, usize, tctx); else - prof_tctx_set(tsd, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); + prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void @@ -504,7 +504,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { - assert(usize == isalloc(tsd, ptr, true)); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() @@ -521,9 +521,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); if (unlikely(sampled)) - prof_malloc_sample_object(tsd, ptr, usize, tctx); + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); else - prof_tctx_reset(tsd, ptr, usize, old_ptr, old_tctx); + prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); if (unlikely(old_sampled)) prof_free_sampled_object(tsd, old_usize, old_tctx); @@ -532,10 +532,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize) { - prof_tctx_t *tctx = prof_tctx_get(tsd, ptr); + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); cassert(config_prof); - assert(usize == isalloc(tsd, ptr, true)); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_free_sampled_object(tsd, usize, tctx); diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index 59f60235..70883b1a 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -130,27 +130,25 @@ extern size_t tcache_maxclass; */ extern tcaches_t *tcaches; -size_t tcache_salloc(tsd_t *tsd, const void *ptr); +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); -void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); -void tcache_arena_associate(tsd_t *tsd, tcache_t *tcache, arena_t *arena); -void tcache_arena_reassociate(tsd_t *tsd, tcache_t *tcache, +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, arena_t *newarena); -void tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena); tcache_t *tcache_get_hard(tsd_t *tsd); -tcache_t *tcache_create(tsd_t *tsd, arena_t *arena); +tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); void tcache_cleanup(tsd_t *tsd); void tcache_enabled_cleanup(tsd_t *tsd); -void tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena); -bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsdn_t *tsdn, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); -bool tcache_boot(tsd_t *tsd); +bool tcache_boot(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ @@ -297,8 +295,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, if (unlikely(arena == NULL)) return (NULL); - ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind, - &tcache_hard_success); + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + tbin, binind, &tcache_hard_success); if (tcache_hard_success == false) return (NULL); } @@ -310,7 +308,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = index2size(binind); - assert(tcache_salloc(tsd, ret) == usize); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); } if (likely(!zero)) { @@ -358,7 +356,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, if (unlikely(arena == NULL)) return (NULL); - ret = arena_malloc_large(tsd, arena, binind, zero); + ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); if (ret == NULL) return (NULL); } else { @@ -407,7 +405,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; - assert(tcache_salloc(tsd, ptr) <= SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); @@ -434,8 +432,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(tsd, ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(tsd, ptr) <= tcache_maxclass); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); binind = size2index(size); @@ -460,7 +458,8 @@ tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; if (unlikely(elm->tcache == NULL)) { - elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL)); + elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, + NULL)); } return (elm->tcache); } diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h index 4a99ee6e..bf113411 100644 --- a/include/jemalloc/internal/tsd.h +++ b/include/jemalloc/internal/tsd.h @@ -13,6 +13,9 @@ typedef struct tsd_init_head_s tsd_init_head_t; #endif typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; + +#define TSDN_NULL ((tsdn_t *)0) typedef enum { tsd_state_uninitialized, @@ -44,6 +47,7 @@ typedef enum { * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} + * bool example_tsd_booted_get(void) {...} * example_t *example_tsd_get() {...} * void example_tsd_set(example_t *val) {...} * @@ -98,6 +102,8 @@ a_attr void \ a_name##tsd_boot1(void); \ a_attr bool \ a_name##tsd_boot(void); \ +a_attr bool \ +a_name##tsd_booted_get(void); \ a_attr a_type * \ a_name##tsd_get(void); \ a_attr void \ @@ -201,6 +207,12 @@ a_name##tsd_boot(void) \ \ return (a_name##tsd_boot0()); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ @@ -246,6 +258,12 @@ a_name##tsd_boot(void) \ \ return (a_name##tsd_boot0()); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ @@ -368,6 +386,12 @@ a_name##tsd_boot(void) \ a_name##tsd_boot1(); \ return (false); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ @@ -490,6 +514,12 @@ a_name##tsd_boot(void) \ a_name##tsd_boot1(); \ return (false); \ } \ +a_attr bool \ +a_name##tsd_booted_get(void) \ +{ \ + \ + return (a_name##tsd_booted); \ +} \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(void) \ @@ -571,6 +601,15 @@ MALLOC_TSD #undef O }; +/* + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. + */ +struct tsdn_s { + tsd_t tsd; +}; + static const tsd_t tsd_initializer = TSD_INITIALIZER; malloc_tsd_types(, tsd_t) @@ -601,6 +640,7 @@ void tsd_cleanup(void *arg); malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) tsd_t *tsd_fetch(void); +tsdn_t *tsd_tsdn(tsd_t *tsd); bool tsd_nominal(tsd_t *tsd); #define O(n, t) \ t *tsd_##n##p_get(tsd_t *tsd); \ @@ -608,6 +648,9 @@ t tsd_##n##_get(tsd_t *tsd); \ void tsd_##n##_set(tsd_t *tsd, t n); MALLOC_TSD #undef O +tsdn_t *tsdn_fetch(void); +bool tsdn_null(const tsdn_t *tsdn); +tsd_t *tsdn_tsd(tsdn_t *tsdn); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) @@ -634,6 +677,13 @@ tsd_fetch(void) return (tsd); } +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) +{ + + return ((tsdn_t *)tsd); +} + JEMALLOC_INLINE bool tsd_nominal(tsd_t *tsd) { @@ -665,6 +715,32 @@ tsd_##n##_set(tsd_t *tsd, t n) \ } MALLOC_TSD #undef O + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) +{ + + if (!tsd_booted_get()) + return (NULL); + + return (tsd_tsdn(tsd_fetch())); +} + +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) +{ + + return (tsdn == NULL); +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) +{ + + assert(!tsdn_null(tsdn)); + + return (&tsdn->tsd); +} #endif #endif /* JEMALLOC_H_INLINES */ diff --git a/include/jemalloc/internal/valgrind.h b/include/jemalloc/internal/valgrind.h index 2667bf5e..1a868082 100644 --- a/include/jemalloc/internal/valgrind.h +++ b/include/jemalloc/internal/valgrind.h @@ -30,17 +30,17 @@ * calls must be embedded in macros rather than in functions so that when * Valgrind reports errors, there are no extra stack frames in the backtraces. */ -#define JEMALLOC_VALGRIND_MALLOC(cond, tsd, ptr, usize, zero) do { \ +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ if (unlikely(in_valgrind && cond)) { \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsd, ptr), \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ zero); \ } \ } while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsd, ptr, usize, \ +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do { \ if (unlikely(in_valgrind)) { \ - size_t rzsize = p2rz(tsd, ptr); \ + size_t rzsize = p2rz(tsdn, ptr); \ \ if (!maybe_moved || ptr == old_ptr) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ @@ -83,8 +83,8 @@ #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, tsd, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsd, ptr, usize, \ +#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) +#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do {} while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h index b2e6e825..4d312eab 100644 --- a/include/jemalloc/internal/witness.h +++ b/include/jemalloc/internal/witness.h @@ -75,23 +75,23 @@ void witness_init(witness_t *witness, const char *name, witness_rank_t rank, typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); extern witness_lock_error_t *witness_lock_error; #endif -void witness_lock(tsd_t *tsd, witness_t *witness); -void witness_unlock(tsd_t *tsd, witness_t *witness); +void witness_lock(tsdn_t *tsdn, witness_t *witness); +void witness_unlock(tsdn_t *tsdn, witness_t *witness); #ifdef JEMALLOC_JET typedef void (witness_owner_error_t)(const witness_t *); extern witness_owner_error_t *witness_owner_error; #endif -void witness_assert_owner(tsd_t *tsd, const witness_t *witness); +void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); #ifdef JEMALLOC_JET typedef void (witness_not_owner_error_t)(const witness_t *); extern witness_not_owner_error_t *witness_not_owner_error; #endif -void witness_assert_not_owner(tsd_t *tsd, const witness_t *witness); +void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); #ifdef JEMALLOC_JET typedef void (witness_lockless_error_t)(const witness_list_t *); extern witness_lockless_error_t *witness_lockless_error; #endif -void witness_assert_lockless(tsd_t *tsd); +void witness_assert_lockless(tsdn_t *tsdn); void witnesses_cleanup(tsd_t *tsd); void witness_fork_cleanup(tsd_t *tsd); diff --git a/src/arena.c b/src/arena.c index 992d96f5..c605bcd3 100644 --- a/src/arena.c +++ b/src/arena.c @@ -37,11 +37,11 @@ static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ * definition. */ -static void arena_purge_to_limit(tsd_t *tsd, arena_t *arena, +static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit); -static void arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, +static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, bool decommitted); -static void arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, +static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); @@ -592,7 +592,7 @@ arena_chunk_init_spare(arena_t *arena) } static bool -arena_chunk_register(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, bool zero) { @@ -604,61 +604,62 @@ arena_chunk_register(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, */ extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); extent_node_achunk_set(&chunk->node, true); - return (chunk_register(tsd, chunk, &chunk->node)); + return (chunk_register(tsdn, chunk, &chunk->node)); } static arena_chunk_t * -arena_chunk_alloc_internal_hard(tsd_t *tsd, arena_t *arena, +arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) { arena_chunk_t *chunk; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); - chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsd, arena, chunk_hooks, + chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize, chunksize, zero, commit); if (chunk != NULL && !*commit) { /* Commit header. */ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind)) { - chunk_dalloc_wrapper(tsd, arena, chunk_hooks, + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, chunksize, *zero, *commit); chunk = NULL; } } - if (chunk != NULL && arena_chunk_register(tsd, arena, chunk, *zero)) { + if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) { if (!*commit) { /* Undo commit of header. */ chunk_hooks->decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } - chunk_dalloc_wrapper(tsd, arena, chunk_hooks, (void *)chunk, + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, chunksize, *zero, *commit); chunk = NULL; } - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); return (chunk); } static arena_chunk_t * -arena_chunk_alloc_internal(tsd_t *tsd, arena_t *arena, bool *zero, bool *commit) +arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, + bool *commit) { arena_chunk_t *chunk; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - chunk = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, chunksize, + chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, chunksize, zero, true); if (chunk != NULL) { - if (arena_chunk_register(tsd, arena, chunk, *zero)) { - chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, + if (arena_chunk_register(tsdn, arena, chunk, *zero)) { + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, chunksize, true); return (NULL); } *commit = true; } if (chunk == NULL) { - chunk = arena_chunk_alloc_internal_hard(tsd, arena, + chunk = arena_chunk_alloc_internal_hard(tsdn, arena, &chunk_hooks, zero, commit); } @@ -671,7 +672,7 @@ arena_chunk_alloc_internal(tsd_t *tsd, arena_t *arena, bool *zero, bool *commit) } static arena_chunk_t * -arena_chunk_init_hard(tsd_t *tsd, arena_t *arena) +arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) { arena_chunk_t *chunk; bool zero, commit; @@ -681,7 +682,7 @@ arena_chunk_init_hard(tsd_t *tsd, arena_t *arena) zero = false; commit = false; - chunk = arena_chunk_alloc_internal(tsd, arena, &zero, &commit); + chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); if (chunk == NULL) return (NULL); @@ -726,14 +727,14 @@ arena_chunk_init_hard(tsd_t *tsd, arena_t *arena) } static arena_chunk_t * -arena_chunk_alloc(tsd_t *tsd, arena_t *arena) +arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) { arena_chunk_t *chunk; if (arena->spare != NULL) chunk = arena_chunk_init_spare(arena); else { - chunk = arena_chunk_init_hard(tsd, arena); + chunk = arena_chunk_init_hard(tsdn, arena); if (chunk == NULL) return (NULL); } @@ -746,7 +747,7 @@ arena_chunk_alloc(tsd_t *tsd, arena_t *arena) } static void -arena_chunk_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) +arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) { bool committed; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; @@ -761,12 +762,12 @@ arena_chunk_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) * chunk as committed has a high potential for causing later * access of decommitted memory. */ - chunk_hooks = chunk_hooks_get(tsd, arena); + chunk_hooks = chunk_hooks_get(tsdn, arena); chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, arena->ind); } - chunk_dalloc_cache(tsd, arena, &chunk_hooks, (void *)chunk, chunksize, + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, committed); if (config_stats) { @@ -776,7 +777,7 @@ arena_chunk_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) } static void -arena_spare_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *spare) +arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) { assert(arena->spare != spare); @@ -786,11 +787,11 @@ arena_spare_discard(tsd_t *tsd, arena_t *arena, arena_chunk_t *spare) chunk_npages-map_bias); } - arena_chunk_discard(tsd, arena, spare); + arena_chunk_discard(tsdn, arena, spare); } static void -arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) +arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) { arena_chunk_t *spare; @@ -812,7 +813,7 @@ arena_chunk_dalloc(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) spare = arena->spare; arena->spare = chunk; if (spare != NULL) - arena_spare_discard(tsd, arena, spare); + arena_spare_discard(tsdn, arena, spare); } static void @@ -896,64 +897,64 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, } extent_node_t * -arena_node_alloc(tsd_t *tsd, arena_t *arena) +arena_node_alloc(tsdn_t *tsdn, arena_t *arena) { extent_node_t *node; - malloc_mutex_lock(tsd, &arena->node_cache_mtx); + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); node = ql_last(&arena->node_cache, ql_link); if (node == NULL) { - malloc_mutex_unlock(tsd, &arena->node_cache_mtx); - return (base_alloc(tsd, sizeof(extent_node_t))); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); + return (base_alloc(tsdn, sizeof(extent_node_t))); } ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); - malloc_mutex_unlock(tsd, &arena->node_cache_mtx); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); return (node); } void -arena_node_dalloc(tsd_t *tsd, arena_t *arena, extent_node_t *node) +arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) { - malloc_mutex_lock(tsd, &arena->node_cache_mtx); + malloc_mutex_lock(tsdn, &arena->node_cache_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->node_cache, node, ql_link); - malloc_mutex_unlock(tsd, &arena->node_cache_mtx); + malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); } static void * -arena_chunk_alloc_huge_hard(tsd_t *tsd, arena_t *arena, +arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, size_t csize) { void *ret; bool commit = true; - ret = chunk_alloc_wrapper(tsd, arena, chunk_hooks, NULL, csize, + ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, alignment, zero, &commit); if (ret == NULL) { /* Revert optimistic stats updates. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_malloc_stats_update_undo(arena, usize); arena->stats.mapped -= usize; } arena_nactive_sub(arena, usize >> LG_PAGE); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } return (ret); } void * -arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize, +arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero) { void *ret; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize = CHUNK_CEILING(usize); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); /* Optimistically update stats. */ if (config_stats) { @@ -962,11 +963,11 @@ arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize, } arena_nactive_add(arena, usize >> LG_PAGE); - ret = chunk_alloc_cache(tsd, arena, &chunk_hooks, NULL, csize, + ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, alignment, zero, true); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (ret == NULL) { - ret = arena_chunk_alloc_huge_hard(tsd, arena, &chunk_hooks, + ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, usize, alignment, zero, csize); } @@ -974,49 +975,49 @@ arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize, } void -arena_chunk_dalloc_huge(tsd_t *tsd, arena_t *arena, void *chunk, size_t usize) +arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; size_t csize; csize = CHUNK_CEILING(usize); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_dalloc_stats_update(arena, usize); arena->stats.mapped -= usize; } arena_nactive_sub(arena, usize >> LG_PAGE); - chunk_dalloc_cache(tsd, arena, &chunk_hooks, chunk, csize, true); - malloc_mutex_unlock(tsd, &arena->lock); + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_chunk_ralloc_huge_similar(tsd_t *tsd, arena_t *arena, void *chunk, +arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize) { assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); assert(oldsize != usize); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) arena_huge_ralloc_stats_update(arena, oldsize, usize); if (oldsize < usize) arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); else arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk, +arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize) { size_t udiff = oldsize - usize; size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_ralloc_stats_update(arena, oldsize, usize); if (cdiff != 0) @@ -1029,35 +1030,35 @@ arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk, void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(usize)); - chunk_dalloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff, + chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, true); } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } static bool -arena_chunk_ralloc_huge_expand_hard(tsd_t *tsd, arena_t *arena, +arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff, size_t cdiff) { bool err; bool commit = true; - err = (chunk_alloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff, + err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, chunksize, zero, &commit) == NULL); if (err) { /* Revert optimistic stats updates. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena_huge_ralloc_stats_update_undo(arena, oldsize, usize); arena->stats.mapped -= cdiff; } arena_nactive_sub(arena, udiff >> LG_PAGE); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_wrapper(tsd, arena, chunk_hooks, nchunk, cdiff, + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, *zero, true); err = true; } @@ -1065,16 +1066,16 @@ arena_chunk_ralloc_huge_expand_hard(tsd_t *tsd, arena_t *arena, } bool -arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk, +arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t oldsize, size_t usize, bool *zero) { bool err; - chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena); + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); size_t udiff = usize - oldsize; size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); /* Optimistically update stats. */ if (config_stats) { @@ -1083,16 +1084,16 @@ arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk, } arena_nactive_add(arena, udiff >> LG_PAGE); - err = (chunk_alloc_cache(tsd, arena, &chunk_hooks, nchunk, cdiff, + err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, chunksize, zero, true) == NULL); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (err) { - err = arena_chunk_ralloc_huge_expand_hard(tsd, arena, + err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, cdiff); } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, true, arena->ind)) { - chunk_dalloc_wrapper(tsd, arena, &chunk_hooks, nchunk, cdiff, + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, *zero, true); err = true; } @@ -1133,7 +1134,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) } static arena_run_t * -arena_run_alloc_large(tsd_t *tsd, arena_t *arena, size_t size, bool zero) +arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) { arena_chunk_t *chunk; arena_run_t *run; @@ -1149,7 +1150,7 @@ arena_run_alloc_large(tsd_t *tsd, arena_t *arena, size_t size, bool zero) /* * No usable runs. Create a new chunk from which to allocate the run. */ - chunk = arena_chunk_alloc(tsd, arena); + chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { run = &arena_miscelm_get_mutable(chunk, map_bias)->run; if (arena_run_split_large(arena, run, size, zero)) @@ -1177,7 +1178,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) } static arena_run_t * -arena_run_alloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind) +arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) { arena_chunk_t *chunk; arena_run_t *run; @@ -1194,7 +1195,7 @@ arena_run_alloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind) /* * No usable runs. Create a new chunk from which to allocate the run. */ - chunk = arena_chunk_alloc(tsd, arena); + chunk = arena_chunk_alloc(tsdn, arena); if (chunk != NULL) { run = &arena_miscelm_get_mutable(chunk, map_bias)->run; if (arena_run_split_small(arena, run, size, binind)) @@ -1219,28 +1220,28 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) } ssize_t -arena_lg_dirty_mult_get(tsd_t *tsd, arena_t *arena) +arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) { ssize_t lg_dirty_mult; - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); lg_dirty_mult = arena->lg_dirty_mult; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (lg_dirty_mult); } bool -arena_lg_dirty_mult_set(tsd_t *tsd, arena_t *arena, ssize_t lg_dirty_mult) +arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) { if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) return (true); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena->lg_dirty_mult = lg_dirty_mult; - arena_maybe_purge(tsd, arena); - malloc_mutex_unlock(tsd, &arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } @@ -1397,25 +1398,25 @@ arena_decay_time_valid(ssize_t decay_time) } ssize_t -arena_decay_time_get(tsd_t *tsd, arena_t *arena) +arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) { ssize_t decay_time; - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); decay_time = arena->decay_time; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (decay_time); } bool -arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time) +arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) { if (!arena_decay_time_valid(decay_time)) return (true); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); /* * Restart decay backlog from scratch, which may cause many dirty pages * to be immediately purged. It would conceptually be possible to map @@ -1425,14 +1426,14 @@ arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time) * arbitrary change during initial arena configuration. */ arena_decay_init(arena, decay_time); - arena_maybe_purge(tsd, arena); - malloc_mutex_unlock(tsd, &arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } static void -arena_maybe_purge_ratio(tsd_t *tsd, arena_t *arena) +arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) { assert(opt_purge == purge_mode_ratio); @@ -1455,12 +1456,12 @@ arena_maybe_purge_ratio(tsd_t *tsd, arena_t *arena) */ if (arena->ndirty <= threshold) return; - arena_purge_to_limit(tsd, arena, threshold); + arena_purge_to_limit(tsdn, arena, threshold); } } static void -arena_maybe_purge_decay(tsd_t *tsd, arena_t *arena) +arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) { nstime_t time; size_t ndirty_limit; @@ -1470,7 +1471,7 @@ arena_maybe_purge_decay(tsd_t *tsd, arena_t *arena) /* Purge all or nothing if the option is disabled. */ if (arena->decay_time <= 0) { if (arena->decay_time == 0) - arena_purge_to_limit(tsd, arena, 0); + arena_purge_to_limit(tsdn, arena, 0); return; } @@ -1491,11 +1492,11 @@ arena_maybe_purge_decay(tsd_t *tsd, arena_t *arena) */ if (arena->ndirty <= ndirty_limit) return; - arena_purge_to_limit(tsd, arena, ndirty_limit); + arena_purge_to_limit(tsdn, arena, ndirty_limit); } void -arena_maybe_purge(tsd_t *tsd, arena_t *arena) +arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) { /* Don't recursively purge. */ @@ -1503,9 +1504,9 @@ arena_maybe_purge(tsd_t *tsd, arena_t *arena) return; if (opt_purge == purge_mode_ratio) - arena_maybe_purge_ratio(tsd, arena); + arena_maybe_purge_ratio(tsdn, arena); else - arena_maybe_purge_decay(tsd, arena); + arena_maybe_purge_decay(tsdn, arena); } static size_t @@ -1543,7 +1544,7 @@ arena_dirty_count(arena_t *arena) } static size_t -arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { @@ -1574,7 +1575,7 @@ arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, * dalloc_node=false argument to chunk_alloc_cache(). */ zero = false; - chunk = chunk_alloc_cache(tsd, arena, chunk_hooks, + chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, extent_node_addr_get(chunkselm), extent_node_size_get(chunkselm), chunksize, &zero, false); @@ -1609,7 +1610,7 @@ arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, * prior to allocation. */ if (chunk == arena->spare) - arena_chunk_alloc(tsd, arena); + arena_chunk_alloc(tsdn, arena); /* Temporarily allocate the free dirty run. */ arena_run_split_large(arena, run, run_size, false); @@ -1633,7 +1634,7 @@ arena_stash_dirty(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } static size_t -arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { @@ -1645,7 +1646,7 @@ arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, nmadvise = 0; npurged = 0; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); for (rdelm = qr_next(purge_runs_sentinel, rd_link), chunkselm = qr_next(purge_chunks_sentinel, cc_link); rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { @@ -1684,7 +1685,7 @@ arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, flag_unzeroed = 0; flags = CHUNK_MAP_DECOMMITTED; } else { - flag_unzeroed = chunk_purge_wrapper(tsd, arena, + flag_unzeroed = chunk_purge_wrapper(tsdn, arena, chunk_hooks, chunk, chunksize, pageind << LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; flags = flag_unzeroed; @@ -1715,7 +1716,7 @@ arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, if (config_stats) nmadvise++; } - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_stats) { arena->stats.nmadvise += nmadvise; @@ -1726,7 +1727,7 @@ arena_purge_stashed(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } static void -arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_runs_dirty_link_t *purge_runs_sentinel, extent_node_t *purge_chunks_sentinel) { @@ -1746,9 +1747,9 @@ arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, bool zeroed = extent_node_zeroed_get(chunkselm); bool committed = extent_node_committed_get(chunkselm); extent_node_dirty_remove(chunkselm); - arena_node_dalloc(tsd, arena, chunkselm); + arena_node_dalloc(tsdn, arena, chunkselm); chunkselm = chunkselm_next; - chunk_dalloc_wrapper(tsd, arena, chunk_hooks, addr, + chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, size, zeroed, committed); } else { arena_chunk_t *chunk = @@ -1760,7 +1761,7 @@ arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, pageind) != 0); arena_run_t *run = &miscelm->run; qr_remove(rdelm, rd_link); - arena_run_dalloc(tsd, arena, run, false, true, + arena_run_dalloc(tsdn, arena, run, false, true, decommitted); } } @@ -1776,9 +1777,9 @@ arena_unstash_purged(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, * (arena->ndirty >= ndirty_limit) */ static void -arena_purge_to_limit(tsd_t *tsd, arena_t *arena, size_t ndirty_limit) +arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) { - chunk_hooks_t chunk_hooks = chunk_hooks_get(tsd, arena); + chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); size_t npurge, npurged; arena_runs_dirty_link_t purge_runs_sentinel; extent_node_t purge_chunks_sentinel; @@ -1799,14 +1800,14 @@ arena_purge_to_limit(tsd_t *tsd, arena_t *arena, size_t ndirty_limit) qr_new(&purge_runs_sentinel, rd_link); extent_node_dirty_linkage_init(&purge_chunks_sentinel); - npurge = arena_stash_dirty(tsd, arena, &chunk_hooks, ndirty_limit, + npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, &purge_runs_sentinel, &purge_chunks_sentinel); if (npurge == 0) goto label_return; - npurged = arena_purge_stashed(tsd, arena, &chunk_hooks, + npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, &purge_chunks_sentinel); assert(npurged == npurge); - arena_unstash_purged(tsd, arena, &chunk_hooks, &purge_runs_sentinel, + arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, &purge_chunks_sentinel); if (config_stats) @@ -1817,15 +1818,15 @@ label_return: } void -arena_purge(tsd_t *tsd, arena_t *arena, bool all) +arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) { - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (all) - arena_purge_to_limit(tsd, arena, 0); + arena_purge_to_limit(tsdn, arena, 0); else - arena_maybe_purge(tsd, arena); - malloc_mutex_unlock(tsd, &arena->lock); + arena_maybe_purge(tsdn, arena); + malloc_mutex_unlock(tsdn, &arena->lock); } static void @@ -1845,7 +1846,8 @@ arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) if (arena_mapbits_large_get(chunk, pageind) != 0) { void *ptr = (void *)((uintptr_t)chunk + (pageind << LG_PAGE)); - size_t usize = isalloc(tsd, ptr, config_prof); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, + config_prof); prof_free(tsd, ptr, usize); npages = arena_mapbits_large_size_get(chunk, @@ -1902,39 +1904,39 @@ arena_reset(tsd_t *tsd, arena_t *arena) } /* Huge allocations. */ - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); for (node = ql_last(&arena->huge, ql_link); node != NULL; node = ql_last(&arena->huge, ql_link)) { void *ptr = extent_node_addr_get(node); size_t usize; - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); if (config_stats || (config_prof && opt_prof)) - usize = isalloc(tsd, ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); /* Remove huge allocation from prof sample set. */ if (config_prof && opt_prof) prof_free(tsd, ptr, usize); - huge_dalloc(tsd, ptr); - malloc_mutex_lock(tsd, &arena->huge_mtx); + huge_dalloc(tsd_tsdn(tsd), ptr); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); /* Cancel out unwanted effects on stats. */ if (config_stats) arena_huge_reset_stats_cancel(arena, usize); } - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); /* Bins. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->runcur = NULL; arena_run_heap_new(&bin->runs); if (config_stats) { bin->stats.curregs = 0; bin->stats.curruns = 0; } - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } /* @@ -1952,12 +1954,13 @@ arena_reset(tsd_t *tsd, arena_t *arena) for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = ql_last(&arena->achunks, ql_link)) { ql_remove(&arena->achunks, node, ql_link); - arena_chunk_discard(tsd, arena, extent_node_addr_get(node)); + arena_chunk_discard(tsd_tsdn(tsd), arena, + extent_node_addr_get(node)); } /* Spare. */ if (arena->spare != NULL) { - arena_chunk_discard(tsd, arena, arena->spare); + arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); arena->spare = NULL; } @@ -1967,7 +1970,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) for(i = 0; i < runs_avail_nclasses; i++) arena_run_heap_new(&arena->runs_avail[i]); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } static void @@ -2084,7 +2087,7 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, } static void -arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, bool dirty, +arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, bool decommitted) { arena_chunk_t *chunk; @@ -2145,7 +2148,7 @@ arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, bool dirty, if (size == arena_maxrun) { assert(run_ind == map_bias); assert(run_pages == (arena_maxrun >> LG_PAGE)); - arena_chunk_dalloc(tsd, arena, chunk); + arena_chunk_dalloc(tsdn, arena, chunk); } /* @@ -2156,11 +2159,11 @@ arena_run_dalloc(tsd_t *tsd, arena_t *arena, arena_run_t *run, bool dirty, * chances of spuriously crossing the dirty page purging threshold. */ if (dirty) - arena_maybe_purge(tsd, arena); + arena_maybe_purge(tsdn, arena); } static void -arena_run_trim_head(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); @@ -2196,12 +2199,12 @@ arena_run_trim_head(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind+head_npages))); - arena_run_dalloc(tsd, arena, run, false, false, (flag_decommitted != + arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != 0)); } static void -arena_run_trim_tail(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) { arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); @@ -2241,7 +2244,7 @@ arena_run_trim_tail(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); tail_run = &tail_miscelm->run; - arena_run_dalloc(tsd, arena, tail_run, dirty, false, (flag_decommitted + arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted != 0)); } @@ -2268,7 +2271,7 @@ arena_bin_nonfull_run_tryget(arena_bin_t *bin) } static arena_run_t * -arena_bin_nonfull_run_get(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) +arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) { arena_run_t *run; szind_t binind; @@ -2284,19 +2287,19 @@ arena_bin_nonfull_run_get(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) bin_info = &arena_bin_info[binind]; /* Allocate a new run. */ - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(tsd, &arena->lock); - run = arena_run_alloc_small(tsd, arena, bin_info->run_size, binind); + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); if (run != NULL) { /* Initialize run internals. */ run->binind = binind; run->nfree = bin_info->nregs; bitmap_init(run->bitmap, &bin_info->bitmap_info); } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); /********************************/ - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if (run != NULL) { if (config_stats) { bin->stats.nruns++; @@ -2319,7 +2322,7 @@ arena_bin_nonfull_run_get(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ static void * -arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) { szind_t binind; arena_bin_info_t *bin_info; @@ -2328,7 +2331,7 @@ arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; bin->runcur = NULL; - run = arena_bin_nonfull_run_get(tsd, arena, bin); + run = arena_bin_nonfull_run_get(tsdn, arena, bin); if (bin->runcur != NULL && bin->runcur->nfree > 0) { /* * Another thread updated runcur while this one ran without the @@ -2350,7 +2353,7 @@ arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) */ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); if (run->nfree == bin_info->nregs) { - arena_dalloc_bin_run(tsd, arena, chunk, run, + arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); } else arena_bin_lower_run(arena, chunk, run, bin); @@ -2369,7 +2372,7 @@ arena_bin_malloc_hard(tsd_t *tsd, arena_t *arena, arena_bin_t *bin) } void -arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; @@ -2377,10 +2380,10 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(tsd, arena, prof_accumbytes)) - prof_idump(tsd); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) + prof_idump(tsdn); bin = &arena->bins[binind]; - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> tbin->lg_fill_div); i < nfill; i++) { arena_run_t *run; @@ -2388,7 +2391,7 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, if ((run = bin->runcur) != NULL && run->nfree > 0) ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); else - ptr = arena_bin_malloc_hard(tsd, arena, bin); + ptr = arena_bin_malloc_hard(tsdn, arena, bin); if (ptr == NULL) { /* * OOM. tbin->avail isn't yet filled down to its first @@ -2415,9 +2418,9 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, bin->stats.nfills++; tbin->tstats.nrequests = 0; } - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); } void @@ -2529,7 +2532,7 @@ arena_quarantine_junk_small(void *ptr, size_t usize) } static void * -arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; @@ -2540,14 +2543,14 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) bin = &arena->bins[binind]; usize = index2size(binind); - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if ((run = bin->runcur) != NULL && run->nfree > 0) ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); else - ret = arena_bin_malloc_hard(tsd, arena, bin); + ret = arena_bin_malloc_hard(tsdn, arena, bin); if (ret == NULL) { - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); return (NULL); } @@ -2556,9 +2559,9 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) bin->stats.nrequests++; bin->stats.curregs++; } - malloc_mutex_unlock(tsd, &bin->lock); - if (config_prof && !isthreaded && arena_prof_accum(tsd, arena, usize)) - prof_idump(tsd); + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) + prof_idump(tsdn); if (!zero) { if (config_fill) { @@ -2578,12 +2581,12 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) memset(ret, 0, usize); } - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) +arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; size_t usize; @@ -2594,7 +2597,7 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) /* Large allocation. */ usize = index2size(binind); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_cache_oblivious) { uint64_t r; @@ -2607,9 +2610,9 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) random_offset = ((uintptr_t)r) << LG_CACHELINE; } else random_offset = 0; - run = arena_run_alloc_large(tsd, arena, usize + large_pad, zero); + run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); if (run == NULL) { - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } miscelm = arena_run_to_miscelm(run); @@ -2627,9 +2630,9 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) } if (config_prof) idump = arena_prof_accum_locked(arena, usize); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (config_prof && idump) - prof_idump(tsd); + prof_idump(tsdn); if (!zero) { if (config_fill) { @@ -2640,29 +2643,32 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) } } - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero) { - arena = arena_choose(tsd, arena); + assert(!tsdn_null(tsdn) || arena != NULL); + + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL)) return (NULL); if (likely(size <= SMALL_MAXCLASS)) - return (arena_malloc_small(tsd, arena, ind, zero)); + return (arena_malloc_small(tsdn, arena, ind, zero)); if (likely(size <= large_maxclass)) - return (arena_malloc_large(tsd, arena, ind, zero)); - return (huge_malloc(tsd, arena, index2size(ind), zero)); + return (arena_malloc_large(tsdn, arena, ind, zero)); + return (huge_malloc(tsdn, arena, index2size(ind), zero)); } /* Only handles large allocations that require more than page alignment. */ static void * -arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { void *ret; @@ -2672,19 +2678,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena_chunk_map_misc_t *miscelm; void *rpages; + assert(!tsdn_null(tsdn) || arena != NULL); assert(usize == PAGE_CEILING(usize)); - arena = arena_choose(tsd, arena); + if (likely(!tsdn_null(tsdn))) + arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL)) return (NULL); alignment = PAGE_CEILING(alignment); alloc_size = usize + large_pad + alignment; - malloc_mutex_lock(tsd, &arena->lock); - run = arena_run_alloc_large(tsd, arena, alloc_size, false); + malloc_mutex_lock(tsdn, &arena->lock); + run = arena_run_alloc_large(tsdn, arena, alloc_size, false); if (run == NULL) { - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); @@ -2704,11 +2712,11 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, LG_PAGE)); run = &miscelm->run; - arena_run_trim_head(tsd, arena, chunk, head_run, alloc_size, + arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, alloc_size - leadsize); } if (trailsize != 0) { - arena_run_trim_tail(tsd, arena, chunk, run, usize + large_pad + + arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + trailsize, usize + large_pad, false); } if (arena_run_init_large(arena, run, usize + large_pad, zero)) { @@ -2719,8 +2727,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, run_ind) != 0); assert(decommitted); /* Cause of OOM. */ - arena_run_dalloc(tsd, arena, run, dirty, false, decommitted); - malloc_mutex_unlock(tsd, &arena->lock); + arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); + malloc_mutex_unlock(tsdn, &arena->lock); return (NULL); } ret = arena_miscelm_to_rpages(miscelm); @@ -2735,7 +2743,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); if (config_fill && !zero) { if (unlikely(opt_junk_alloc)) @@ -2743,12 +2751,12 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, else if (unlikely(opt_zero)) memset(ret, 0, usize); } - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } void * -arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; @@ -2756,7 +2764,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true); } else if (usize <= large_maxclass && alignment <= PAGE) { /* @@ -2765,25 +2773,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, * the base of the run, so do some bit manipulation to retrieve * the base. */ - ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, + ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, tcache, true); if (config_cache_oblivious) ret = (void *)((uintptr_t)ret & ~PAGE_MASK); } else { if (likely(usize <= large_maxclass)) { - ret = arena_palloc_large(tsd, arena, usize, alignment, + ret = arena_palloc_large(tsdn, arena, usize, alignment, zero); } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsd, arena, usize, zero); + ret = huge_malloc(tsdn, arena, usize, zero); else { - ret = huge_palloc(tsd, arena, usize, alignment, zero); + ret = huge_palloc(tsdn, arena, usize, alignment, zero); } } return (ret); } void -arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size) +arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) { arena_chunk_t *chunk; size_t pageind; @@ -2792,8 +2800,8 @@ arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size) cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS); - assert(isalloc(tsd, ptr, true) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); assert(size <= SMALL_MAXCLASS); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); @@ -2802,8 +2810,8 @@ arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size) assert(binind < NBINS); arena_mapbits_large_binind_set(chunk, pageind, binind); - assert(isalloc(tsd, ptr, false) == LARGE_MINCLASS); - assert(isalloc(tsd, ptr, true) == size); + assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); + assert(isalloc(tsdn, ptr, true) == size); } static void @@ -2834,19 +2842,19 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, } static void -arena_dalloc_bin_run(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin) { assert(run != bin->runcur); - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(tsd, &arena->lock); - arena_run_dalloc(tsd, arena, run, true, false, false); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_dalloc(tsdn, arena, run, true, false, false); + malloc_mutex_unlock(tsdn, &arena->lock); /****************************/ - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); if (config_stats) bin->stats.curruns--; } @@ -2873,7 +2881,7 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, } static void -arena_dalloc_bin_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) { size_t pageind, rpages_ind; @@ -2895,7 +2903,7 @@ arena_dalloc_bin_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, arena_run_reg_dalloc(run, ptr); if (run->nfree == bin_info->nregs) { arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(tsd, arena, chunk, run, bin); + arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); } else if (run->nfree == 1 && run != bin->runcur) arena_bin_lower_run(arena, chunk, run, bin); @@ -2906,15 +2914,15 @@ arena_dalloc_bin_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, } void -arena_dalloc_bin_junked_locked(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, - void *ptr, arena_chunk_map_bits_t *bitselm) +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) { - arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, true); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); } void -arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm) { arena_run_t *run; @@ -2924,14 +2932,14 @@ arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; bin = &arena->bins[run->binind]; - malloc_mutex_lock(tsd, &bin->lock); - arena_dalloc_bin_locked_impl(tsd, arena, chunk, ptr, bitselm, false); - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); + malloc_mutex_unlock(tsdn, &bin->lock); } void -arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) +arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t pageind) { arena_chunk_map_bits_t *bitselm; @@ -2941,8 +2949,8 @@ arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, pageind)) != BININD_INVALID); } bitselm = arena_bitselm_get_mutable(chunk, pageind); - arena_dalloc_bin(tsd, arena, chunk, ptr, pageind, bitselm); - arena_decay_tick(tsd, arena); + arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); + arena_decay_tick(tsdn, arena); } #ifdef JEMALLOC_JET @@ -2964,8 +2972,8 @@ arena_dalloc_junk_large_t *arena_dalloc_junk_large = #endif static void -arena_dalloc_large_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, - void *ptr, bool junked) +arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, + arena_chunk_t *chunk, void *ptr, bool junked) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, @@ -2988,29 +2996,30 @@ arena_dalloc_large_locked_impl(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, } } - arena_run_dalloc(tsd, arena, run, true, false, false); + arena_run_dalloc(tsdn, arena, run, true, false, false); } void -arena_dalloc_large_junked_locked(tsd_t *tsd, arena_t *arena, +arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr) { - arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, true); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); } void -arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr) +arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, + void *ptr) { - malloc_mutex_lock(tsd, &arena->lock); - arena_dalloc_large_locked_impl(tsd, arena, chunk, ptr, false); - malloc_mutex_unlock(tsd, &arena->lock); - arena_decay_tick(tsd, arena); + malloc_mutex_lock(tsdn, &arena->lock); + arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); + malloc_mutex_unlock(tsdn, &arena->lock); + arena_decay_tick(tsdn, arena); } static void -arena_ralloc_large_shrink(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t size) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; @@ -3024,8 +3033,8 @@ arena_ralloc_large_shrink(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, * Shrink the run, and make trailing pages available for other * allocations. */ - malloc_mutex_lock(tsd, &arena->lock); - arena_run_trim_tail(tsd, arena, chunk, run, oldsize + large_pad, size + + malloc_mutex_lock(tsdn, &arena->lock); + arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + large_pad, true); if (config_stats) { szind_t oldindex = size2index(oldsize) - NBINS; @@ -3043,11 +3052,11 @@ arena_ralloc_large_shrink(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } static bool -arena_ralloc_large_grow(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, +arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; @@ -3058,7 +3067,7 @@ arena_ralloc_large_grow(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, large_pad); /* Try to extend the run. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, pageind+npages) != 0) goto label_fail; @@ -3138,11 +3147,11 @@ arena_ralloc_large_grow(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, arena->stats.lstats[index].nrequests++; arena->stats.lstats[index].curruns++; } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } label_fail: - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (true); } @@ -3171,7 +3180,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large = * always fail if growing an object, and the following run is already in use. */ static bool -arena_ralloc_large(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, +arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { arena_chunk_t *chunk; @@ -3186,16 +3195,16 @@ arena_ralloc_large(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, arena = extent_node_arena_get(&chunk->node); if (oldsize < usize_max) { - bool ret = arena_ralloc_large_grow(tsd, arena, chunk, ptr, + bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, oldsize, usize_min, usize_max, zero); if (config_fill && !ret && !zero) { if (unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, - isalloc(tsd, ptr, config_prof) - oldsize); + isalloc(tsdn, ptr, config_prof) - oldsize); } else if (unlikely(opt_zero)) { memset((void *)((uintptr_t)ptr + oldsize), 0, - isalloc(tsd, ptr, config_prof) - oldsize); + isalloc(tsdn, ptr, config_prof) - oldsize); } } return (ret); @@ -3204,12 +3213,12 @@ arena_ralloc_large(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, assert(oldsize > usize_max); /* Fill before shrinking in order avoid a race. */ arena_ralloc_junk_large(ptr, oldsize, usize_max); - arena_ralloc_large_shrink(tsd, arena, chunk, ptr, oldsize, usize_max); + arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); return (false); } bool -arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { size_t usize_min, usize_max; @@ -3239,32 +3248,32 @@ arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, } else { if (usize_max <= SMALL_MAXCLASS) return (true); - if (arena_ralloc_large(tsd, ptr, oldsize, usize_min, + if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, usize_max, zero)) return (true); } chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena_decay_tick(tsd, extent_node_arena_get(&chunk->node)); + arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); return (false); } else { - return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min, + return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, usize_max, zero)); } } static void * -arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { if (alignment == 0) - return (arena_malloc(tsd, arena, usize, size2index(usize), zero, - tcache, true)); + return (arena_malloc(tsdn, arena, usize, size2index(usize), + zero, tcache, true)); usize = sa2u(usize, alignment); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return (NULL); - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); } void * @@ -3282,7 +3291,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t copysize; /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero)) + if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, + zero)) return (ptr); /* @@ -3290,8 +3300,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, * the object. In that case, fall back to allocating new space * and copying. */ - ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, - zero, tcache); + ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, + alignment, zero, tcache); if (ret == NULL) return (NULL); @@ -3312,25 +3322,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, } dss_prec_t -arena_dss_prec_get(tsd_t *tsd, arena_t *arena) +arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) { dss_prec_t ret; - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ret = arena->dss_prec; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (ret); } bool -arena_dss_prec_set(tsd_t *tsd, arena_t *arena, dss_prec_t dss_prec) +arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena->dss_prec = dss_prec; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); return (false); } @@ -3387,19 +3397,19 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, } void -arena_basic_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty) { - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, decay_time, nactive, ndirty); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } void -arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, @@ -3409,7 +3419,7 @@ arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, cassert(config_stats); - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, decay_time, nactive, ndirty); @@ -3440,12 +3450,12 @@ arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); bstats[i].nmalloc += bin->stats.nmalloc; bstats[i].ndalloc += bin->stats.ndalloc; bstats[i].nrequests += bin->stats.nrequests; @@ -3457,7 +3467,7 @@ arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads, bstats[i].nruns += bin->stats.nruns; bstats[i].reruns += bin->stats.reruns; bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); } } @@ -3483,7 +3493,7 @@ arena_nthreads_dec(arena_t *arena, bool internal) } arena_t * -arena_new(tsd_t *tsd, unsigned ind) +arena_new(tsdn_t *tsdn, unsigned ind) { arena_t *arena; size_t arena_size; @@ -3497,11 +3507,12 @@ arena_new(tsd_t *tsd, unsigned ind) * because there is no way to clean up if base_alloc() OOMs. */ if (config_stats) { - arena = (arena_t *)base_alloc(tsd, CACHELINE_CEILING(arena_size) - + QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + - nhclasses) * sizeof(malloc_huge_stats_t)); + arena = (arena_t *)base_alloc(tsdn, + CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses * + sizeof(malloc_large_stats_t) + nhclasses) * + sizeof(malloc_huge_stats_t)); } else - arena = (arena_t *)base_alloc(tsd, arena_size); + arena = (arena_t *)base_alloc(tsdn, arena_size); if (arena == NULL) return (NULL); @@ -3540,7 +3551,7 @@ arena_new(tsd_t *tsd, unsigned ind) (uint64_t)(uintptr_t)arena; } - arena->dss_prec = chunk_dss_prec_get(tsd); + arena->dss_prec = chunk_dss_prec_get(tsdn); ql_new(&arena->achunks); @@ -3823,58 +3834,58 @@ arena_boot(void) } void -arena_prefork0(tsd_t *tsd, arena_t *arena) +arena_prefork0(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsd, &arena->lock); + malloc_mutex_prefork(tsdn, &arena->lock); } void -arena_prefork1(tsd_t *tsd, arena_t *arena) +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsd, &arena->chunks_mtx); + malloc_mutex_prefork(tsdn, &arena->chunks_mtx); } void -arena_prefork2(tsd_t *tsd, arena_t *arena) +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { - malloc_mutex_prefork(tsd, &arena->node_cache_mtx); + malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); } void -arena_prefork3(tsd_t *tsd, arena_t *arena) +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { unsigned i; for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(tsd, &arena->bins[i].lock); - malloc_mutex_prefork(tsd, &arena->huge_mtx); + malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + malloc_mutex_prefork(tsdn, &arena->huge_mtx); } void -arena_postfork_parent(tsd_t *tsd, arena_t *arena) +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_postfork_parent(tsd, &arena->huge_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(tsd, &arena->bins[i].lock); - malloc_mutex_postfork_parent(tsd, &arena->node_cache_mtx); - malloc_mutex_postfork_parent(tsd, &arena->chunks_mtx); - malloc_mutex_postfork_parent(tsd, &arena->lock); + malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->lock); } void -arena_postfork_child(tsd_t *tsd, arena_t *arena) +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; - malloc_mutex_postfork_child(tsd, &arena->huge_mtx); + malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(tsd, &arena->bins[i].lock); - malloc_mutex_postfork_child(tsd, &arena->node_cache_mtx); - malloc_mutex_postfork_child(tsd, &arena->chunks_mtx); - malloc_mutex_postfork_child(tsd, &arena->lock); + malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); + malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); + malloc_mutex_postfork_child(tsdn, &arena->lock); } diff --git a/src/base.c b/src/base.c index 901553a1..81b0801f 100644 --- a/src/base.c +++ b/src/base.c @@ -14,11 +14,11 @@ static size_t base_mapped; /******************************************************************************/ static extent_node_t * -base_node_try_alloc(tsd_t *tsd) +base_node_try_alloc(tsdn_t *tsdn) { extent_node_t *node; - malloc_mutex_assert_owner(tsd, &base_mtx); + malloc_mutex_assert_owner(tsdn, &base_mtx); if (base_nodes == NULL) return (NULL); @@ -29,10 +29,10 @@ base_node_try_alloc(tsd_t *tsd) } static void -base_node_dalloc(tsd_t *tsd, extent_node_t *node) +base_node_dalloc(tsdn_t *tsdn, extent_node_t *node) { - malloc_mutex_assert_owner(tsd, &base_mtx); + malloc_mutex_assert_owner(tsdn, &base_mtx); JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); *(extent_node_t **)node = base_nodes; @@ -40,22 +40,22 @@ base_node_dalloc(tsd_t *tsd, extent_node_t *node) } static extent_node_t * -base_chunk_alloc(tsd_t *tsd, size_t minsize) +base_chunk_alloc(tsdn_t *tsdn, size_t minsize) { extent_node_t *node; size_t csize, nsize; void *addr; - malloc_mutex_assert_owner(tsd, &base_mtx); + malloc_mutex_assert_owner(tsdn, &base_mtx); assert(minsize != 0); - node = base_node_try_alloc(tsd); + node = base_node_try_alloc(tsdn); /* Allocate enough space to also carve a node out if necessary. */ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0; csize = CHUNK_CEILING(minsize + nsize); addr = chunk_alloc_base(csize); if (addr == NULL) { if (node != NULL) - base_node_dalloc(tsd, node); + base_node_dalloc(tsdn, node); return (NULL); } base_mapped += csize; @@ -78,7 +78,7 @@ base_chunk_alloc(tsd_t *tsd, size_t minsize) * physical memory usage. */ void * -base_alloc(tsd_t *tsd, size_t size) +base_alloc(tsdn_t *tsdn, size_t size) { void *ret; size_t csize, usize; @@ -93,14 +93,14 @@ base_alloc(tsd_t *tsd, size_t size) usize = s2u(csize); extent_node_init(&key, NULL, NULL, usize, false, false); - malloc_mutex_lock(tsd, &base_mtx); + malloc_mutex_lock(tsdn, &base_mtx); node = extent_tree_szad_nsearch(&base_avail_szad, &key); if (node != NULL) { /* Use existing space. */ extent_tree_szad_remove(&base_avail_szad, node); } else { /* Try to allocate more space. */ - node = base_chunk_alloc(tsd, csize); + node = base_chunk_alloc(tsdn, csize); } if (node == NULL) { ret = NULL; @@ -113,7 +113,7 @@ base_alloc(tsd_t *tsd, size_t size) extent_node_size_set(node, extent_node_size_get(node) - csize); extent_tree_szad_insert(&base_avail_szad, node); } else - base_node_dalloc(tsd, node); + base_node_dalloc(tsdn, node); if (config_stats) { base_allocated += csize; /* @@ -125,21 +125,22 @@ base_alloc(tsd_t *tsd, size_t size) } JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize); label_return: - malloc_mutex_unlock(tsd, &base_mtx); + malloc_mutex_unlock(tsdn, &base_mtx); return (ret); } void -base_stats_get(tsd_t *tsd, size_t *allocated, size_t *resident, size_t *mapped) +base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, + size_t *mapped) { - malloc_mutex_lock(tsd, &base_mtx); + malloc_mutex_lock(tsdn, &base_mtx); assert(base_allocated <= base_resident); assert(base_resident <= base_mapped); *allocated = base_allocated; *resident = base_resident; *mapped = base_mapped; - malloc_mutex_unlock(tsd, &base_mtx); + malloc_mutex_unlock(tsdn, &base_mtx); } bool @@ -155,22 +156,22 @@ base_boot(void) } void -base_prefork(tsd_t *tsd) +base_prefork(tsdn_t *tsdn) { - malloc_mutex_prefork(tsd, &base_mtx); + malloc_mutex_prefork(tsdn, &base_mtx); } void -base_postfork_parent(tsd_t *tsd) +base_postfork_parent(tsdn_t *tsdn) { - malloc_mutex_postfork_parent(tsd, &base_mtx); + malloc_mutex_postfork_parent(tsdn, &base_mtx); } void -base_postfork_child(tsd_t *tsd) +base_postfork_child(tsdn_t *tsdn) { - malloc_mutex_postfork_child(tsd, &base_mtx); + malloc_mutex_postfork_child(tsdn, &base_mtx); } diff --git a/src/chunk.c b/src/chunk.c index 1f2afd9d..adc666ff 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -49,7 +49,7 @@ const chunk_hooks_t chunk_hooks_default = { * definition. */ -static void chunk_record(tsd_t *tsd, arena_t *arena, +static void chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, bool committed); @@ -64,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena) } chunk_hooks_t -chunk_hooks_get(tsd_t *tsd, arena_t *arena) +chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) { chunk_hooks_t chunk_hooks; - malloc_mutex_lock(tsd, &arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks = chunk_hooks_get_locked(arena); - malloc_mutex_unlock(tsd, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (chunk_hooks); } chunk_hooks_t -chunk_hooks_set(tsd_t *tsd, arena_t *arena, const chunk_hooks_t *chunk_hooks) +chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) { chunk_hooks_t old_chunk_hooks; - malloc_mutex_lock(tsd, &arena->chunks_mtx); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); old_chunk_hooks = arena->chunk_hooks; /* * Copy each field atomically so that it is impossible for readers to @@ -105,13 +105,13 @@ chunk_hooks_set(tsd_t *tsd, arena_t *arena, const chunk_hooks_t *chunk_hooks) ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(merge); #undef ATOMIC_COPY_HOOK - malloc_mutex_unlock(tsd, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (old_chunk_hooks); } static void -chunk_hooks_assure_initialized_impl(tsd_t *tsd, arena_t *arena, +chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool locked) { static const chunk_hooks_t uninitialized_hooks = @@ -120,28 +120,28 @@ chunk_hooks_assure_initialized_impl(tsd_t *tsd, arena_t *arena, if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 0) { *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : - chunk_hooks_get(tsd, arena); + chunk_hooks_get(tsdn, arena); } } static void -chunk_hooks_assure_initialized_locked(tsd_t *tsd, arena_t *arena, +chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks) { - chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, true); + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); } static void -chunk_hooks_assure_initialized(tsd_t *tsd, arena_t *arena, +chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks) { - chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, false); + chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); } bool -chunk_register(tsd_t *tsd, const void *chunk, const extent_node_t *node) +chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node) { assert(extent_node_addr_get(node) == chunk); @@ -161,7 +161,7 @@ chunk_register(tsd_t *tsd, const void *chunk, const extent_node_t *node) high = atomic_read_z(&highchunks); } if (cur > high && prof_gdump_get_unlocked()) - prof_gdump(tsd); + prof_gdump(tsdn); } return (false); @@ -199,7 +199,7 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, } static void * -chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_node) @@ -221,8 +221,8 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); - malloc_mutex_lock(tsd, &arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); if (new_addr != NULL) { extent_node_t key; extent_node_init(&key, arena, new_addr, alloc_size, false, @@ -234,7 +234,7 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < size)) { - malloc_mutex_unlock(tsd, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), @@ -253,7 +253,7 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, if (leadsize != 0 && chunk_hooks->split(extent_node_addr_get(node), extent_node_size_get(node), leadsize, size, false, arena->ind)) { - malloc_mutex_unlock(tsd, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } /* Remove node from the tree. */ @@ -273,19 +273,19 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, if (chunk_hooks->split(ret, size + trailsize, size, trailsize, false, arena->ind)) { if (dalloc_node && node != NULL) - arena_node_dalloc(tsd, arena, node); - malloc_mutex_unlock(tsd, &arena->chunks_mtx); - chunk_record(tsd, arena, chunk_hooks, chunks_szad, + arena_node_dalloc(tsdn, arena, node); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad, cache, ret, size + trailsize, zeroed, committed); return (NULL); } /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { - node = arena_node_alloc(tsd, arena); + node = arena_node_alloc(tsdn, arena); if (node == NULL) { - malloc_mutex_unlock(tsd, &arena->chunks_mtx); - chunk_record(tsd, arena, chunk_hooks, + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad, cache, ret, size + trailsize, zeroed, committed); return (NULL); @@ -299,16 +299,16 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, node = NULL; } if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { - malloc_mutex_unlock(tsd, &arena->chunks_mtx); - chunk_record(tsd, arena, chunk_hooks, chunks_szad, chunks_ad, + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); + chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad, cache, ret, size, zeroed, committed); return (NULL); } - malloc_mutex_unlock(tsd, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); assert(dalloc_node || node != NULL); if (dalloc_node && node != NULL) - arena_node_dalloc(tsd, arena, node); + arena_node_dalloc(tsdn, arena, node); if (*zero) { if (!zeroed) memset(ret, 0, size); @@ -331,7 +331,7 @@ chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, * them if they are returned. */ static void * -chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, +chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; @@ -343,7 +343,7 @@ chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = - chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero, + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* mmap. */ @@ -352,7 +352,7 @@ chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, return (ret); /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = - chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero, + chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); @@ -383,7 +383,7 @@ chunk_alloc_base(size_t size) } void * -chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node) { void *ret; @@ -395,9 +395,9 @@ chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, assert((alignment & chunksize_mask) == 0); commit = true; - ret = chunk_recycle(tsd, arena, chunk_hooks, &arena->chunks_szad_cached, - &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, - &commit, dalloc_node); + ret = chunk_recycle(tsdn, arena, chunk_hooks, + &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, + new_addr, size, alignment, zero, &commit, dalloc_node); if (ret == NULL) return (NULL); assert(commit); @@ -407,11 +407,11 @@ chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } static arena_t * -chunk_arena_get(tsd_t *tsd, unsigned arena_ind) +chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) { arena_t *arena; - arena = arena_get(tsd, arena_ind, false); + arena = arena_get(tsdn, arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. @@ -425,12 +425,12 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; arena_t *arena; - tsd = tsd_fetch(); - arena = chunk_arena_get(tsd, arena_ind); - ret = chunk_alloc_core(tsd, arena, new_addr, size, alignment, zero, + tsdn = tsdn_fetch(); + arena = chunk_arena_get(tsdn, arena_ind); + ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, commit, arena->dss_prec); if (ret == NULL) return (NULL); @@ -441,7 +441,7 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, } static void * -chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; @@ -451,7 +451,7 @@ chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(alignment != 0); assert((alignment & chunksize_mask) == 0); - ret = chunk_recycle(tsd, arena, chunk_hooks, + ret = chunk_recycle(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, new_addr, size, alignment, zero, commit, true); @@ -462,14 +462,14 @@ chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } void * -chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; - chunk_hooks_assure_initialized(tsd, arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); - ret = chunk_alloc_retained(tsd, arena, chunk_hooks, new_addr, size, + ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, alignment, zero, commit); if (ret == NULL) { ret = chunk_hooks->alloc(new_addr, size, alignment, zero, @@ -484,7 +484,7 @@ chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } static void -chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed, bool committed) { @@ -496,8 +496,8 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, unzeroed = cache || !zeroed; JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - malloc_mutex_lock(tsd, &arena->chunks_mtx); - chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks); + malloc_mutex_lock(tsdn, &arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); @@ -522,7 +522,7 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ - node = arena_node_alloc(tsd, arena); + node = arena_node_alloc(tsdn, arena); if (node == NULL) { /* * Node allocation failed, which is an exceedingly @@ -531,7 +531,7 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, * a virtual memory leak. */ if (cache) { - chunk_purge_wrapper(tsd, arena, chunk_hooks, + chunk_purge_wrapper(tsdn, arena, chunk_hooks, chunk, size, 0, size); } goto label_return; @@ -568,15 +568,15 @@ chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_szad_insert(chunks_szad, node); arena_chunk_cache_maybe_insert(arena, node, cache); - arena_node_dalloc(tsd, arena, prev); + arena_node_dalloc(tsdn, arena, prev); } label_return: - malloc_mutex_unlock(tsd, &arena->chunks_mtx); + malloc_mutex_unlock(tsdn, &arena->chunks_mtx); } void -chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed) { @@ -585,9 +585,9 @@ chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_cached, + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached, &arena->chunks_ad_cached, true, chunk, size, false, committed); - arena_maybe_purge(tsd, arena); + arena_maybe_purge(tsdn, arena); } static bool @@ -595,13 +595,13 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed, unsigned arena_ind) { - if (!have_dss || !chunk_in_dss(tsd_fetch(), chunk)) + if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk)) return (chunk_dalloc_mmap(chunk, size)); return (true); } void -chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed, bool committed) { @@ -610,7 +610,7 @@ chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(size != 0); assert((size & chunksize_mask) == 0); - chunk_hooks_assure_initialized(tsd, arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); /* Try to deallocate. */ if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) return; @@ -621,7 +621,7 @@ chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, } zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, arena->ind); - chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_retained, + chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); if (config_stats) @@ -662,11 +662,11 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, } bool -chunk_purge_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks, +chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length) { - chunk_hooks_assure_initialized(tsd, arena, chunk_hooks); + chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); } @@ -688,8 +688,8 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, if (!maps_coalesce) return (true); if (have_dss) { - tsd_t *tsd = tsd_fetch(); - if (chunk_in_dss(tsd, chunk_a) != chunk_in_dss(tsd, chunk_b)) + tsdn_t *tsdn = tsdn_fetch(); + if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b)) return (true); } @@ -700,7 +700,7 @@ static rtree_node_elm_t * chunks_rtree_node_alloc(size_t nelms) { - return ((rtree_node_elm_t *)base_alloc(tsd_fetch(), nelms * + return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms * sizeof(rtree_node_elm_t))); } @@ -747,22 +747,22 @@ chunk_boot(void) } void -chunk_prefork(tsd_t *tsd) +chunk_prefork(tsdn_t *tsdn) { - chunk_dss_prefork(tsd); + chunk_dss_prefork(tsdn); } void -chunk_postfork_parent(tsd_t *tsd) +chunk_postfork_parent(tsdn_t *tsdn) { - chunk_dss_postfork_parent(tsd); + chunk_dss_postfork_parent(tsdn); } void -chunk_postfork_child(tsd_t *tsd) +chunk_postfork_child(tsdn_t *tsdn) { - chunk_dss_postfork_child(tsd); + chunk_dss_postfork_child(tsdn); } diff --git a/src/chunk_dss.c b/src/chunk_dss.c index 3b3f2433..0b1f82bd 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -41,32 +41,32 @@ chunk_dss_sbrk(intptr_t increment) } dss_prec_t -chunk_dss_prec_get(tsd_t *tsd) +chunk_dss_prec_get(tsdn_t *tsdn) { dss_prec_t ret; if (!have_dss) return (dss_prec_disabled); - malloc_mutex_lock(tsd, &dss_mtx); + malloc_mutex_lock(tsdn, &dss_mtx); ret = dss_prec_default; - malloc_mutex_unlock(tsd, &dss_mtx); + malloc_mutex_unlock(tsdn, &dss_mtx); return (ret); } bool -chunk_dss_prec_set(tsd_t *tsd, dss_prec_t dss_prec) +chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec) { if (!have_dss) return (dss_prec != dss_prec_disabled); - malloc_mutex_lock(tsd, &dss_mtx); + malloc_mutex_lock(tsdn, &dss_mtx); dss_prec_default = dss_prec; - malloc_mutex_unlock(tsd, &dss_mtx); + malloc_mutex_unlock(tsdn, &dss_mtx); return (false); } void * -chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, +chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { cassert(have_dss); @@ -80,7 +80,7 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, if ((intptr_t)size < 0) return (NULL); - malloc_mutex_lock(tsd, &dss_mtx); + malloc_mutex_lock(tsdn, &dss_mtx); if (dss_prev != (void *)-1) { /* @@ -122,7 +122,7 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, if ((uintptr_t)ret < (uintptr_t)dss_max || (uintptr_t)dss_next < (uintptr_t)dss_max) { /* Wrap-around. */ - malloc_mutex_unlock(tsd, &dss_mtx); + malloc_mutex_unlock(tsdn, &dss_mtx); return (NULL); } incr = gap_size + cpad_size + size; @@ -130,11 +130,11 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; - malloc_mutex_unlock(tsd, &dss_mtx); + malloc_mutex_unlock(tsdn, &dss_mtx); if (cpad_size != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - chunk_dalloc_wrapper(tsd, arena, + chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, cpad, cpad_size, false, true); } @@ -149,25 +149,25 @@ chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size, } } while (dss_prev != (void *)-1); } - malloc_mutex_unlock(tsd, &dss_mtx); + malloc_mutex_unlock(tsdn, &dss_mtx); return (NULL); } bool -chunk_in_dss(tsd_t *tsd, void *chunk) +chunk_in_dss(tsdn_t *tsdn, void *chunk) { bool ret; cassert(have_dss); - malloc_mutex_lock(tsd, &dss_mtx); + malloc_mutex_lock(tsdn, &dss_mtx); if ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk < (uintptr_t)dss_max) ret = true; else ret = false; - malloc_mutex_unlock(tsd, &dss_mtx); + malloc_mutex_unlock(tsdn, &dss_mtx); return (ret); } @@ -188,27 +188,27 @@ chunk_dss_boot(void) } void -chunk_dss_prefork(tsd_t *tsd) +chunk_dss_prefork(tsdn_t *tsdn) { if (have_dss) - malloc_mutex_prefork(tsd, &dss_mtx); + malloc_mutex_prefork(tsdn, &dss_mtx); } void -chunk_dss_postfork_parent(tsd_t *tsd) +chunk_dss_postfork_parent(tsdn_t *tsdn) { if (have_dss) - malloc_mutex_postfork_parent(tsd, &dss_mtx); + malloc_mutex_postfork_parent(tsdn, &dss_mtx); } void -chunk_dss_postfork_child(tsd_t *tsd) +chunk_dss_postfork_child(tsdn_t *tsdn) { if (have_dss) - malloc_mutex_postfork_child(tsd, &dss_mtx); + malloc_mutex_postfork_child(tsdn, &dss_mtx); } /******************************************************************************/ diff --git a/src/ckh.c b/src/ckh.c index 25185974..747c1c86 100644 --- a/src/ckh.c +++ b/src/ckh.c @@ -40,8 +40,8 @@ /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); -static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); +static bool ckh_grow(tsdn_t *tsdn, ckh_t *ckh); +static void ckh_shrink(tsdn_t *tsdn, ckh_t *ckh); /******************************************************************************/ @@ -244,7 +244,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) } static bool -ckh_grow(tsd_t *tsd, ckh_t *ckh) +ckh_grow(tsdn_t *tsdn, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; @@ -270,8 +270,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ret = true; goto label_return; } - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, - true, arena_ichoose(tsd, NULL)); + tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, + true, arena_ichoose(tsdn, NULL)); if (tab == NULL) { ret = true; goto label_return; @@ -283,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, NULL, true, true); + idalloctm(tsdn, tab, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, NULL, true, true); + idalloctm(tsdn, ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } @@ -299,7 +299,7 @@ label_return: } static void -ckh_shrink(tsd_t *tsd, ckh_t *ckh) +ckh_shrink(tsdn_t *tsdn, ckh_t *ckh) { ckhc_t *tab, *ttab; size_t usize; @@ -314,8 +314,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) return; - tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - arena_ichoose(tsd, NULL)); + tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, true, + arena_ichoose(tsdn, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -330,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; if (!ckh_rebuild(ckh, tab)) { - idalloctm(tsd, tab, NULL, true, true); + idalloctm(tsdn, tab, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -338,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloctm(tsd, ckh->tab, NULL, true, true); + idalloctm(tsdn, ckh->tab, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -347,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) } bool -ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, +ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) { bool ret; @@ -391,8 +391,8 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true, - arena_ichoose(tsd, NULL)); + ckh->tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, + true, arena_ichoose(tsdn, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; @@ -404,7 +404,7 @@ label_return: } void -ckh_delete(tsd_t *tsd, ckh_t *ckh) +ckh_delete(tsdn_t *tsdn, ckh_t *ckh) { assert(ckh != NULL); @@ -421,7 +421,7 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloctm(tsd, ckh->tab, NULL, true, true); + idalloctm(tsdn, ckh->tab, NULL, true, true); if (config_debug) memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); } @@ -456,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) } bool -ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) +ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data) { bool ret; @@ -468,7 +468,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) #endif while (ckh_try_insert(ckh, &key, &data)) { - if (ckh_grow(tsd, ckh)) { + if (ckh_grow(tsdn, ckh)) { ret = true; goto label_return; } @@ -480,7 +480,7 @@ label_return: } bool -ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, +ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; @@ -502,7 +502,7 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ - ckh_shrink(tsd, ckh); + ckh_shrink(tsdn, ckh); } return (false); diff --git a/src/ctl.c b/src/ctl.c index fd5561a3..dad80086 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -46,20 +46,20 @@ static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ void *oldp, size_t *oldlenp, void *newp, size_t newlen); #define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(tsd_t *tsd, \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ const size_t *mib, size_t miblen, size_t i); static bool ctl_arena_init(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats, +static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena); static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats); -static void ctl_arena_refresh(tsd_t *tsd, arena_t *arena, unsigned i); -static bool ctl_grow(tsd_t *tsd); -static void ctl_refresh(tsd_t *tsd); -static bool ctl_init(tsd_t *tsd); -static int ctl_lookup(tsd_t *tsd, const char *name, +static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i); +static bool ctl_grow(tsdn_t *tsdn); +static void ctl_refresh(tsdn_t *tsdn); +static bool ctl_init(tsdn_t *tsdn); +static int ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp); CTL_PROTO(version) @@ -117,7 +117,7 @@ CTL_PROTO(opt_prof_accum) CTL_PROTO(tcache_create) CTL_PROTO(tcache_flush) CTL_PROTO(tcache_destroy) -static void arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all); +static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all); CTL_PROTO(arena_i_purge) CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_reset) @@ -560,12 +560,12 @@ ctl_arena_clear(ctl_arena_stats_t *astats) } static void -ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats, arena_t *arena) +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena) { unsigned i; if (config_stats) { - arena_stats_merge(tsd, arena, &cstats->nthreads, &cstats->dss, + arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, &cstats->pactive, &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats, cstats->hstats); @@ -578,7 +578,7 @@ ctl_arena_stats_amerge(tsd_t *tsd, ctl_arena_stats_t *cstats, arena_t *arena) cstats->nrequests_small += cstats->bstats[i].nrequests; } } else { - arena_basic_stats_merge(tsd, arena, &cstats->nthreads, + arena_basic_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time, &cstats->pactive, &cstats->pdirty); } @@ -656,24 +656,24 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) } static void -ctl_arena_refresh(tsd_t *tsd, arena_t *arena, unsigned i) +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i) { ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; ctl_arena_clear(astats); - ctl_arena_stats_amerge(tsd, astats, arena); + ctl_arena_stats_amerge(tsdn, astats, arena); /* Merge into sum stats as well. */ ctl_arena_stats_smerge(sstats, astats); } static bool -ctl_grow(tsd_t *tsd) +ctl_grow(tsdn_t *tsdn) { ctl_arena_stats_t *astats; /* Initialize new arena. */ - if (arena_init(tsd, ctl_stats.narenas) == NULL) + if (arena_init(tsdn, ctl_stats.narenas) == NULL) return (true); /* Allocate extended arena stats. */ @@ -708,7 +708,7 @@ ctl_grow(tsd_t *tsd) } static void -ctl_refresh(tsd_t *tsd) +ctl_refresh(tsdn_t *tsdn) { unsigned i; VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); @@ -720,19 +720,19 @@ ctl_refresh(tsd_t *tsd) ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); for (i = 0; i < ctl_stats.narenas; i++) - tarenas[i] = arena_get(tsd, i, false); + tarenas[i] = arena_get(tsdn, i, false); for (i = 0; i < ctl_stats.narenas; i++) { bool initialized = (tarenas[i] != NULL); ctl_stats.arenas[i].initialized = initialized; if (initialized) - ctl_arena_refresh(tsd, tarenas[i], i); + ctl_arena_refresh(tsdn, tarenas[i], i); } if (config_stats) { size_t base_allocated, base_resident, base_mapped; - base_stats_get(tsd, &base_allocated, &base_resident, + base_stats_get(tsdn, &base_allocated, &base_resident, &base_mapped); ctl_stats.allocated = ctl_stats.arenas[ctl_stats.narenas].allocated_small + @@ -758,11 +758,11 @@ ctl_refresh(tsd_t *tsd) } static bool -ctl_init(tsd_t *tsd) +ctl_init(tsdn_t *tsdn) { bool ret; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (!ctl_initialized) { /* * Allocate space for one extra arena stats element, which @@ -804,18 +804,18 @@ ctl_init(tsd_t *tsd) ctl_stats.arenas[ctl_stats.narenas].initialized = true; ctl_epoch = 0; - ctl_refresh(tsd); + ctl_refresh(tsdn); ctl_initialized = true; } ret = false; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } static int -ctl_lookup(tsd_t *tsd, const char *name, ctl_node_t const **nodesp, +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, size_t *mibp, size_t *depthp) { int ret; @@ -868,7 +868,7 @@ ctl_lookup(tsd_t *tsd, const char *name, ctl_node_t const **nodesp, } inode = ctl_indexed_node(node->children); - node = inode->index(tsd, mibp, *depthp, (size_t)index); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; @@ -921,13 +921,13 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (!ctl_initialized && ctl_init(tsd)) { + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(tsd, name, nodes, mib, &depth); + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); if (ret != 0) goto label_return; @@ -944,16 +944,16 @@ label_return: } int -ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) +ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (!ctl_initialized && ctl_init(tsd)) { + if (!ctl_initialized && ctl_init(tsdn)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(tsd, name, NULL, mibp, miblenp); + ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp); label_return: return(ret); } @@ -966,7 +966,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, const ctl_named_node_t *node; size_t i; - if (!ctl_initialized && ctl_init(tsd)) { + if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } @@ -988,7 +988,7 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(tsd, mib, miblen, mib[i]); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; @@ -1021,24 +1021,24 @@ ctl_boot(void) } void -ctl_prefork(tsd_t *tsd) +ctl_prefork(tsdn_t *tsdn) { - malloc_mutex_prefork(tsd, &ctl_mtx); + malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(tsd_t *tsd) +ctl_postfork_parent(tsdn_t *tsdn) { - malloc_mutex_postfork_parent(tsd, &ctl_mtx); + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(tsd_t *tsd) +ctl_postfork_child(tsdn_t *tsdn) { - malloc_mutex_postfork_child(tsd, &ctl_mtx); + malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ @@ -1104,7 +1104,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ if (!(c)) \ return (ENOENT); \ if (l) \ - malloc_mutex_lock(tsd, &ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ @@ -1112,7 +1112,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ ret = 0; \ label_return: \ if (l) \ - malloc_mutex_unlock(tsd, &ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } @@ -1126,14 +1126,14 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ \ if (!(c)) \ return (ENOENT); \ - malloc_mutex_lock(tsd, &ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(tsd, &ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } @@ -1145,14 +1145,14 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ int ret; \ t oldval; \ \ - malloc_mutex_lock(tsd, &ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(tsd, &ctl_mtx); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ return (ret); \ } @@ -1243,15 +1243,15 @@ epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; UNUSED uint64_t newval; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); if (newp != NULL) - ctl_refresh(tsd); + ctl_refresh(tsd_tsdn(tsd)); READ(ctl_epoch, uint64_t); ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1317,7 +1317,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, if (oldarena == NULL) return (EAGAIN); - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); newind = oldind = oldarena->ind; WRITE(newind, unsigned); READ(oldind, unsigned); @@ -1331,7 +1331,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, } /* Initialize arena if necessary. */ - newarena = arena_get(tsd, newind, true); + newarena = arena_get(tsd_tsdn(tsd), newind, true); if (newarena == NULL) { ret = EAGAIN; goto label_return; @@ -1341,15 +1341,15 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, if (config_tcache) { tcache_t *tcache = tsd_tcache_get(tsd); if (tcache != NULL) { - tcache_arena_reassociate(tsd, tcache, oldarena, - newarena); + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + oldarena, newarena); } } } ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1476,9 +1476,9 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, if (!config_tcache) return (ENOENT); - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); - if (tcaches_create(tsd, &tcache_ind)) { + if (tcaches_create(tsd_tsdn(tsd), &tcache_ind)) { ret = EFAULT; goto label_return; } @@ -1486,7 +1486,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1541,10 +1541,10 @@ label_return: /******************************************************************************/ static void -arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all) +arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all) { - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); { unsigned narenas = ctl_stats.narenas; @@ -1553,30 +1553,30 @@ arena_i_purge(tsd_t *tsd, unsigned arena_ind, bool all) VARIABLE_ARRAY(arena_t *, tarenas, narenas); for (i = 0; i < narenas; i++) - tarenas[i] = arena_get(tsd, i, false); + tarenas[i] = arena_get(tsdn, i, false); /* * No further need to hold ctl_mtx, since narenas and * tarenas contain everything needed below. */ - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); for (i = 0; i < narenas; i++) { if (tarenas[i] != NULL) - arena_purge(tsd, tarenas[i], all); + arena_purge(tsdn, tarenas[i], all); } } else { arena_t *tarena; assert(arena_ind < narenas); - tarena = arena_get(tsd, arena_ind, false); + tarena = arena_get(tsdn, arena_ind, false); /* No further need to hold ctl_mtx. */ - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); if (tarena != NULL) - arena_purge(tsd, tarena, all); + arena_purge(tsdn, tarena, all); } } } @@ -1589,7 +1589,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, READONLY(); WRITEONLY(); - arena_i_purge(tsd, (unsigned)mib[1], true); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true); ret = 0; label_return: @@ -1604,7 +1604,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, READONLY(); WRITEONLY(); - arena_i_purge(tsd, (unsigned)mib[1], false); + arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false); ret = 0; label_return: @@ -1630,13 +1630,13 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arena_ind = (unsigned)mib[1]; if (config_debug) { - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); assert(arena_ind < ctl_stats.narenas); - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); } assert(arena_ind >= opt_narenas); - arena = arena_get(tsd, arena_ind, false); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); arena_reset(tsd, arena); @@ -1655,7 +1655,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); if (dss != NULL) { int i; @@ -1676,20 +1676,20 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, } if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arena_get(tsd, arena_ind, false); + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL || (dss_prec != dss_prec_limit && - arena_dss_prec_set(tsd, arena, dss_prec))) { + arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) { ret = EFAULT; goto label_return; } - dss_prec_old = arena_dss_prec_get(tsd, arena); + dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena); } else { if (dss_prec != dss_prec_limit && - chunk_dss_prec_set(tsd, dss_prec)) { + chunk_dss_prec_set(tsd_tsdn(tsd), dss_prec)) { ret = EFAULT; goto label_return; } - dss_prec_old = chunk_dss_prec_get(tsd); + dss_prec_old = chunk_dss_prec_get(tsd_tsdn(tsd)); } dss = dss_prec_names[dss_prec_old]; @@ -1697,7 +1697,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1709,14 +1709,14 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(tsd, arena_ind, false); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_lg_dirty_mult_get(tsd, arena); + size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena); READ(oldval, ssize_t); } if (newp != NULL) { @@ -1724,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = EINVAL; goto label_return; } - if (arena_lg_dirty_mult_set(tsd, arena, *(ssize_t *)newp)) { + if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -1743,14 +1744,14 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - arena = arena_get(tsd, arena_ind, false); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); if (arena == NULL) { ret = EFAULT; goto label_return; } if (oldp != NULL && oldlenp != NULL) { - size_t oldval = arena_decay_time_get(tsd, arena); + size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena); READ(oldval, ssize_t); } if (newp != NULL) { @@ -1758,7 +1759,8 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - if (arena_decay_time_set(tsd, arena, *(ssize_t *)newp)) { + if (arena_decay_time_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp)) { ret = EFAULT; goto label_return; } @@ -1777,18 +1779,18 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, unsigned arena_ind = (unsigned)mib[1]; arena_t *arena; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); if (arena_ind < narenas_total_get() && (arena = - arena_get(tsd, arena_ind, false)) != NULL) { + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { if (newp != NULL) { chunk_hooks_t old_chunk_hooks, new_chunk_hooks; WRITE(new_chunk_hooks, chunk_hooks_t); - old_chunk_hooks = chunk_hooks_set(tsd, arena, + old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena, &new_chunk_hooks); READ(old_chunk_hooks, chunk_hooks_t); } else { - chunk_hooks_t old_chunk_hooks = chunk_hooks_get(tsd, - arena); + chunk_hooks_t old_chunk_hooks = + chunk_hooks_get(tsd_tsdn(tsd), arena); READ(old_chunk_hooks, chunk_hooks_t); } } else { @@ -1797,16 +1799,16 @@ arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, } ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } static const ctl_named_node_t * -arena_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t *ret; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (i > ctl_stats.narenas) { ret = NULL; goto label_return; @@ -1814,7 +1816,7 @@ arena_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) ret = super_arena_i_node; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } @@ -1827,7 +1829,7 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; unsigned narenas; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; @@ -1838,7 +1840,7 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1849,7 +1851,7 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; unsigned nread, i; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { ret = EINVAL; @@ -1864,7 +1866,7 @@ arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1929,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > NBINS) @@ -1940,7 +1942,7 @@ arenas_bin_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned) CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_lrun_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) +arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > nlclasses) @@ -1952,7 +1954,7 @@ CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned) CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]), size_t) static const ctl_named_node_t * -arenas_hchunk_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) +arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { if (i > nhclasses) @@ -1967,9 +1969,9 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, int ret; unsigned narenas; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); - if (ctl_grow(tsd)) { + if (ctl_grow(tsd_tsdn(tsd))) { ret = EAGAIN; goto label_return; } @@ -1978,7 +1980,7 @@ arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = 0; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); return (ret); } @@ -1999,9 +2001,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, ret = EINVAL; goto label_return; } - oldval = prof_thread_active_init_set(tsd, *(bool *)newp); + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); } else - oldval = prof_thread_active_init_get(tsd); + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; @@ -2024,9 +2027,9 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - oldval = prof_active_set(tsd, *(bool *)newp); + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); } else - oldval = prof_active_get(tsd); + oldval = prof_active_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; @@ -2072,9 +2075,9 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, ret = EINVAL; goto label_return; } - oldval = prof_gdump_set(tsd, *(bool *)newp); + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); } else - oldval = prof_gdump_get(tsd); + oldval = prof_gdump_get(tsd_tsdn(tsd)); READ(oldval, bool); ret = 0; @@ -2097,7 +2100,7 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, if (lg_sample >= (sizeof(uint64_t) << 3)) lg_sample = (sizeof(uint64_t) << 3) - 1; - prof_reset(tsd, lg_sample); + prof_reset(tsd_tsdn(tsd), lg_sample); ret = 0; label_return: @@ -2185,7 +2188,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(tsd_t *tsd, const size_t *mib, size_t miblen, +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { @@ -2204,7 +2207,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(tsd_t *tsd, const size_t *mib, size_t miblen, +stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { @@ -2224,7 +2227,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks, ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t) static const ctl_named_node_t * -stats_arenas_i_hchunks_j_index(tsd_t *tsd, const size_t *mib, size_t miblen, +stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t j) { @@ -2234,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(tsd_t *tsd, const size_t *mib, size_t miblen, } static const ctl_named_node_t * -stats_arenas_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { const ctl_named_node_t * ret; - malloc_mutex_lock(tsd, &ctl_mtx); + malloc_mutex_lock(tsdn, &ctl_mtx); if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) { ret = NULL; goto label_return; @@ -2246,6 +2249,6 @@ stats_arenas_i_index(tsd_t *tsd, const size_t *mib, size_t miblen, size_t i) ret = super_stats_arenas_i_node; label_return: - malloc_mutex_unlock(tsd, &ctl_mtx); + malloc_mutex_unlock(tsdn, &ctl_mtx); return (ret); } diff --git a/src/huge.c b/src/huge.c index 71fb50c5..0bf61622 100644 --- a/src/huge.c +++ b/src/huge.c @@ -15,12 +15,12 @@ huge_node_get(const void *ptr) } static bool -huge_node_set(tsd_t *tsd, const void *ptr, extent_node_t *node) +huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node) { assert(extent_node_addr_get(node) == ptr); assert(!extent_node_achunk_get(node)); - return (chunk_register(tsd, ptr, node)); + return (chunk_register(tsdn, ptr, node)); } static void @@ -31,16 +31,16 @@ huge_node_unset(const void *ptr, const extent_node_t *node) } void * -huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero) +huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == s2u(usize)); - return (huge_palloc(tsd, arena, usize, chunksize, zero)); + return (huge_palloc(tsdn, arena, usize, chunksize, zero)); } void * -huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, +huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { void *ret; @@ -50,15 +50,17 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, /* Allocate one or more contiguous chunks for this request. */ + assert(!tsdn_null(tsdn) || arena != NULL); + ausize = sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) return (NULL); assert(ausize >= chunksize); /* Allocate an extent node with which to track the chunk. */ - assert(tsd != NULL || arena != NULL); - node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)), - CACHELINE, false, NULL, true, arena_ichoose(tsd, arena)); + assert(tsdn != NULL || arena != NULL); + node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), + CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena)); if (node == NULL) return (NULL); @@ -67,26 +69,26 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsd, arena, - usize, alignment, &is_zeroed)) == NULL) { - idalloctm(tsd, node, NULL, true, true); + arena = arena_choose(tsdn_tsd(tsdn), arena); + if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, + arena, usize, alignment, &is_zeroed)) == NULL) { + idalloctm(tsdn, node, NULL, true, true); return (NULL); } extent_node_init(node, arena, ret, usize, is_zeroed, true); - if (huge_node_set(tsd, ret, node)) { - arena_chunk_dalloc_huge(tsd, arena, ret, usize); - idalloctm(tsd, node, NULL, true, true); + if (huge_node_set(tsdn, ret, node)) { + arena_chunk_dalloc_huge(tsdn, arena, ret, usize); + idalloctm(tsdn, node, NULL, true, true); return (NULL); } /* Insert node into huge. */ - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) @@ -94,7 +96,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, } else if (config_fill && unlikely(opt_junk_alloc)) memset(ret, JEMALLOC_ALLOC_JUNK, usize); - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); return (ret); } @@ -103,7 +105,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) #endif static void -huge_dalloc_junk(tsd_t *tsd, void *ptr, size_t usize) +huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize) { if (config_fill && have_dss && unlikely(opt_junk_free)) { @@ -111,7 +113,7 @@ huge_dalloc_junk(tsd_t *tsd, void *ptr, size_t usize) * Only bother junk filling if the chunk isn't about to be * unmapped. */ - if (!config_munmap || (have_dss && chunk_in_dss(tsd, ptr))) + if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr))) memset(ptr, JEMALLOC_FREE_JUNK, usize); } } @@ -122,7 +124,7 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void -huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize, +huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t usize, usize_next; @@ -151,22 +153,22 @@ huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize, JEMALLOC_FREE_JUNK, sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(tsd, arena, + post_zeroed = !chunk_purge_wrapper(tsdn, arena, &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, sdiff); } } else post_zeroed = pre_zeroed; - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); - arena_chunk_ralloc_huge_similar(tsd, arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { @@ -183,7 +185,8 @@ huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize, } static bool -huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize) +huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize) { extent_node_t *node; arena_t *arena; @@ -194,7 +197,7 @@ huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize) node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); - chunk_hooks = chunk_hooks_get(tsd, arena); + chunk_hooks = chunk_hooks_get(tsdn, arena); assert(oldsize > usize); @@ -207,11 +210,11 @@ huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize) if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { - huge_dalloc_junk(tsd, (void *)((uintptr_t)ptr + usize), + huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize), sdiff); post_zeroed = false; } else { - post_zeroed = !chunk_purge_wrapper(tsd, arena, + post_zeroed = !chunk_purge_wrapper(tsdn, arena, &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + usize), CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); @@ -219,31 +222,31 @@ huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize) } else post_zeroed = pre_zeroed; - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* Zap the excess chunks. */ - arena_chunk_ralloc_huge_shrink(tsd, arena, ptr, oldsize, usize); + arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize); return (false); } static bool -huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize, - bool zero) { +huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, + size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* * Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so @@ -251,14 +254,14 @@ huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize, */ is_zeroed_chunk = zero; - if (arena_chunk_ralloc_huge_expand(tsd, arena, ptr, oldsize, usize, + if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ extent_node_size_set(node, usize); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { @@ -279,7 +282,7 @@ huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize, } bool -huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, +huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { @@ -293,16 +296,16 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { /* Attempt to expand the allocation in-place. */ - if (!huge_ralloc_no_move_expand(tsd, ptr, oldsize, usize_max, + if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, zero)) { - arena_decay_tick(tsd, huge_aalloc(ptr)); + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Try again, this time with usize_min. */ if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsd, + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_min, zero)) { - arena_decay_tick(tsd, huge_aalloc(ptr)); + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } } @@ -313,16 +316,17 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, */ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(tsd, ptr, oldsize, usize_min, + huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, usize_max, zero); - arena_decay_tick(tsd, huge_aalloc(ptr)); + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Attempt to shrink the allocation in-place. */ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { - if (!huge_ralloc_no_move_shrink(tsd, ptr, oldsize, usize_max)) { - arena_decay_tick(tsd, huge_aalloc(ptr)); + if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, + usize_max)) { + arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } } @@ -330,18 +334,18 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min, } static void * -huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, +huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= chunksize) - return (huge_malloc(tsd, arena, usize, zero)); - return (huge_palloc(tsd, arena, usize, alignment, zero)); + return (huge_malloc(tsdn, arena, usize, zero)); + return (huge_palloc(tsdn, arena, usize, alignment, zero)); } void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, - size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; @@ -350,7 +354,8 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, assert(usize > 0 && usize <= HUGE_MAXCLASS); /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero)) + if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, + zero)) return (ptr); /* @@ -358,7 +363,8 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, * different size class. In that case, fall back to allocating new * space and copying. */ - ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero); + ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, + zero); if (ret == NULL) return (NULL); @@ -369,7 +375,7 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, } void -huge_dalloc(tsd_t *tsd, void *ptr) +huge_dalloc(tsdn_t *tsdn, void *ptr) { extent_node_t *node; arena_t *arena; @@ -377,17 +383,17 @@ huge_dalloc(tsd_t *tsd, void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); huge_node_unset(ptr, node); - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_remove(&arena->huge, node, ql_link); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); - huge_dalloc_junk(tsd, extent_node_addr_get(node), + huge_dalloc_junk(tsdn, extent_node_addr_get(node), extent_node_size_get(node)); - arena_chunk_dalloc_huge(tsd, extent_node_arena_get(node), + arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), extent_node_addr_get(node), extent_node_size_get(node)); - idalloctm(tsd, node, NULL, true, true); + idalloctm(tsdn, node, NULL, true, true); - arena_decay_tick(tsd, arena); + arena_decay_tick(tsdn, arena); } arena_t * @@ -398,7 +404,7 @@ huge_aalloc(const void *ptr) } size_t -huge_salloc(tsd_t *tsd, const void *ptr) +huge_salloc(tsdn_t *tsdn, const void *ptr) { size_t size; extent_node_t *node; @@ -406,15 +412,15 @@ huge_salloc(tsd_t *tsd, const void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); size = extent_node_size_get(node); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (size); } prof_tctx_t * -huge_prof_tctx_get(tsd_t *tsd, const void *ptr) +huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *tctx; extent_node_t *node; @@ -422,29 +428,29 @@ huge_prof_tctx_get(tsd_t *tsd, const void *ptr) node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); tctx = extent_node_prof_tctx_get(node); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (tctx); } void -huge_prof_tctx_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx) +huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); - malloc_mutex_lock(tsd, &arena->huge_mtx); + malloc_mutex_lock(tsdn, &arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); - malloc_mutex_unlock(tsd, &arena->huge_mtx); + malloc_mutex_unlock(tsdn, &arena->huge_mtx); } void -huge_prof_tctx_reset(tsd_t *tsd, const void *ptr) +huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) { - huge_prof_tctx_set(tsd, ptr, (prof_tctx_t *)(uintptr_t)1U); + huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); } diff --git a/src/jemalloc.c b/src/jemalloc.c index b1d691ed..40eb2eaa 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -318,15 +318,15 @@ a0ialloc(size_t size, bool zero, bool is_metadata) if (unlikely(malloc_init_a0())) return (NULL); - return (iallocztm(NULL, size, size2index(size), zero, NULL, is_metadata, - arena_get(NULL, 0, true), true)); + return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, + is_metadata, arena_get(TSDN_NULL, 0, true), true)); } static void a0idalloc(void *ptr, bool is_metadata) { - idalloctm(NULL, ptr, false, is_metadata, true); + idalloctm(TSDN_NULL, ptr, false, is_metadata, true); } void * @@ -413,7 +413,7 @@ narenas_total_get(void) /* Create a new arena and insert it into the arenas array at index ind. */ static arena_t * -arena_init_locked(tsd_t *tsd, unsigned ind) +arena_init_locked(tsdn_t *tsdn, unsigned ind) { arena_t *arena; @@ -427,26 +427,26 @@ arena_init_locked(tsd_t *tsd, unsigned ind) * Another thread may have already initialized arenas[ind] if it's an * auto arena. */ - arena = arena_get(tsd, ind, false); + arena = arena_get(tsdn, ind, false); if (arena != NULL) { assert(ind < narenas_auto); return (arena); } /* Actually initialize the arena. */ - arena = arena_new(tsd, ind); + arena = arena_new(tsdn, ind); arena_set(ind, arena); return (arena); } arena_t * -arena_init(tsd_t *tsd, unsigned ind) +arena_init(tsdn_t *tsdn, unsigned ind) { arena_t *arena; - malloc_mutex_lock(tsd, &arenas_lock); - arena = arena_init_locked(tsd, ind); - malloc_mutex_unlock(tsd, &arenas_lock); + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind); + malloc_mutex_unlock(tsdn, &arenas_lock); return (arena); } @@ -455,7 +455,7 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - arena = arena_get(tsd, ind, false); + arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_inc(arena, internal); if (tsd_nominal(tsd)) { @@ -471,8 +471,8 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { arena_t *oldarena, *newarena; - oldarena = arena_get(tsd, oldind, false); - newarena = arena_get(tsd, newind, false); + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); arena_nthreads_dec(oldarena, false); arena_nthreads_inc(newarena, false); tsd_arena_set(tsd, newarena); @@ -483,7 +483,7 @@ arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { arena_t *arena; - arena = arena_get(tsd, ind, false); + arena = arena_get(tsd_tsdn(tsd), ind, false); arena_nthreads_dec(arena, internal); if (internal) tsd_iarena_set(tsd, NULL); @@ -588,19 +588,20 @@ arena_choose_hard(tsd_t *tsd, bool internal) choose[j] = 0; first_null = narenas_auto; - malloc_mutex_lock(tsd, &arenas_lock); - assert(arena_get(tsd, 0, false) != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arena_get(tsd, i, false) != NULL) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ for (j = 0; j < 2; j++) { - if (arena_nthreads_get(arena_get(tsd, i, - false), !!j) < - arena_nthreads_get(arena_get(tsd, - choose[j], false), !!j)) + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) choose[j] = i; } } else if (first_null == narenas_auto) { @@ -618,22 +619,27 @@ arena_choose_hard(tsd_t *tsd, bool internal) } for (j = 0; j < 2; j++) { - if (arena_nthreads_get(arena_get(tsd, choose[j], false), - !!j) == 0 || first_null == narenas_auto) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { /* * Use an unloaded arena, or the least loaded * arena if all arenas are already initialized. */ - if (!!j == internal) - ret = arena_get(tsd, choose[j], false); + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } } else { arena_t *arena; /* Initialize a new arena. */ choose[j] = first_null; - arena = arena_init_locked(tsd, choose[j]); + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j]); if (arena == NULL) { - malloc_mutex_unlock(tsd, &arenas_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); return (NULL); } if (!!j == internal) @@ -641,9 +647,9 @@ arena_choose_hard(tsd_t *tsd, bool internal) } arena_bind(tsd, choose[j], !!j); } - malloc_mutex_unlock(tsd, &arenas_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); } else { - ret = arena_get(tsd, 0, false); + ret = arena_get(tsd_tsdn(tsd), 0, false); arena_bind(tsd, 0, false); arena_bind(tsd, 0, true); } @@ -719,10 +725,10 @@ stats_print_atexit(void) { if (config_tcache && config_stats) { - tsd_t *tsd; + tsdn_t *tsdn; unsigned narenas, i; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); /* * Merge stats from extant threads. This is racy, since @@ -732,7 +738,7 @@ stats_print_atexit(void) * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arena_get(tsd, i, false); + arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; @@ -742,11 +748,11 @@ stats_print_atexit(void) * and bin locks in the opposite order, * deadlocks may result. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tsd, tcache, arena); + tcache_stats_merge(tsdn, tcache, arena); } - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } } } @@ -1256,7 +1262,7 @@ malloc_init_hard_needed(void) } static bool -malloc_init_hard_a0_locked(tsd_t **tsd) +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; @@ -1283,7 +1289,7 @@ malloc_init_hard_a0_locked(tsd_t **tsd) prof_boot1(); if (arena_boot()) return (true); - if (config_tcache && tcache_boot(*tsd)) + if (config_tcache && tcache_boot(TSDN_NULL)) return (true); if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) return (true); @@ -1299,15 +1305,7 @@ malloc_init_hard_a0_locked(tsd_t **tsd) * Initialize one arena here. The rest are lazily created in * arena_choose_hard(). */ - if (arena_init(*tsd, 0) == NULL) - return (true); - - /* - * Initialize tsd, since some code paths cause chunk allocation, which - * in turn depends on tsd. - */ - *tsd = malloc_tsd_boot0(); - if (*tsd == NULL) + if (arena_init(TSDN_NULL, 0) == NULL) return (true); malloc_init_state = malloc_init_a0_initialized; @@ -1319,21 +1317,19 @@ static bool malloc_init_hard_a0(void) { bool ret; - tsd_t *tsd = NULL; - malloc_mutex_lock(tsd, &init_lock); - ret = malloc_init_hard_a0_locked(&tsd); - malloc_mutex_unlock(tsd, &init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (ret); } /* Initialize data structures which may trigger recursive allocation. */ static bool -malloc_init_hard_recursible(tsd_t *tsd) +malloc_init_hard_recursible(void) { malloc_init_state = malloc_init_recursible; - malloc_mutex_unlock(tsd, &init_lock); ncpus = malloc_ncpus(); @@ -1345,17 +1341,15 @@ malloc_init_hard_recursible(tsd_t *tsd) malloc_write(": Error in pthread_atfork()\n"); if (opt_abort) abort(); - malloc_mutex_lock(tsd, &init_lock); return (true); } #endif - malloc_mutex_lock(tsd, &init_lock); return (false); } static bool -malloc_init_hard_finish(tsd_t *tsd) +malloc_init_hard_finish(tsdn_t *tsdn) { if (malloc_mutex_boot()) @@ -1383,7 +1377,7 @@ malloc_init_hard_finish(tsd_t *tsd) narenas_total_set(narenas_auto); /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(tsd, sizeof(arena_t *) * + arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * (MALLOCX_ARENA_MAX+1)); if (arenas == NULL) return (true); @@ -1399,39 +1393,43 @@ malloc_init_hard_finish(tsd_t *tsd) static bool malloc_init_hard(void) { - tsd_t *tsd = NULL; + tsd_t *tsd; #if defined(_WIN32) && _WIN32_WINNT < 0x0600 _init_init_lock(); #endif - malloc_mutex_lock(tsd, &init_lock); + malloc_mutex_lock(TSDN_NULL, &init_lock); if (!malloc_init_hard_needed()) { - malloc_mutex_unlock(tsd, &init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (false); } if (malloc_init_state != malloc_init_a0_initialized && - malloc_init_hard_a0_locked(&tsd)) { - malloc_mutex_unlock(tsd, &init_lock); + malloc_init_hard_a0_locked()) { + malloc_mutex_unlock(TSDN_NULL, &init_lock); return (true); } - if (malloc_init_hard_recursible(tsd)) { - malloc_mutex_unlock(tsd, &init_lock); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) + return (true); + if (malloc_init_hard_recursible()) + return (true); + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + + if (config_prof && prof_boot2(tsd_tsdn(tsd))) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); return (true); } - if (config_prof && prof_boot2(tsd)) { - malloc_mutex_unlock(tsd, &init_lock); + if (malloc_init_hard_finish(tsd_tsdn(tsd))) { + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); return (true); } - if (malloc_init_hard_finish(tsd)) { - malloc_mutex_unlock(tsd, &init_lock); - return (true); - } - - malloc_mutex_unlock(tsd, &init_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); malloc_tsd_boot1(); return (false); } @@ -1457,7 +1455,7 @@ ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(tsd, p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = ialloc(tsd, usize, ind, zero, slow_path); @@ -1479,7 +1477,7 @@ ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(tsd, p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); return (p); } @@ -1487,19 +1485,24 @@ ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) /* * ialloc_body() is inlined so that fast and slow paths are generated separately * with statically known slow_path. + * + * This function guarantees that *tsdn is non-NULL on success. */ JEMALLOC_ALWAYS_INLINE_C void * -ialloc_body(size_t size, bool zero, tsd_t **tsd, size_t *usize, bool slow_path) +ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, + bool slow_path) { + tsd_t *tsd; szind_t ind; if (slow_path && unlikely(malloc_init())) { - *tsd = NULL; + *tsdn = NULL; return (NULL); } - *tsd = tsd_fetch(); - witness_assert_lockless(*tsd); + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); ind = size2index(size); if (unlikely(ind >= NSIZES)) @@ -1512,16 +1515,18 @@ ialloc_body(size_t size, bool zero, tsd_t **tsd, size_t *usize, bool slow_path) } if (config_prof && opt_prof) - return (ialloc_prof(*tsd, *usize, ind, zero, slow_path)); + return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); - return (ialloc(*tsd, size, ind, zero, slow_path)); + return (ialloc(tsd, size, ind, zero, slow_path)); } JEMALLOC_ALWAYS_INLINE_C void -ialloc_post_check(void *ret, tsd_t *tsd, size_t usize, const char *func, +ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, bool update_errno, bool slow_path) { + assert(!tsdn_null(tsdn) || ret == NULL); + if (unlikely(ret == NULL)) { if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { malloc_printf(": Error in %s(): out of " @@ -1532,10 +1537,10 @@ ialloc_post_check(void *ret, tsd_t *tsd, size_t usize, const char *func, set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(tsd, ret, config_prof)); - *tsd_thread_allocatedp_get(tsd) += usize; + assert(usize == isalloc(tsdn, ret, config_prof)); + *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; } - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -1544,20 +1549,20 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_malloc(size_t size) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; size_t usize JEMALLOC_CC_SILENCE_INIT(0); if (size == 0) size = 1; if (likely(!malloc_slow)) { - ret = ialloc_body(size, false, &tsd, &usize, false); - ialloc_post_check(ret, tsd, usize, "malloc", true, false); + ret = ialloc_body(size, false, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "malloc", true, false); } else { - ret = ialloc_body(size, false, &tsd, &usize, true); - ialloc_post_check(ret, tsd, usize, "malloc", true, true); + ret = ialloc_body(size, false, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "malloc", true, true); UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsd, ret, usize, false); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); } return (ret); @@ -1576,7 +1581,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); if (p == NULL) return (NULL); - arena_prof_promoted(tsd, p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = ipalloc(tsd, usize, alignment, false); @@ -1598,7 +1603,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(tsd, p, usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, usize, tctx); return (p); } @@ -1620,7 +1625,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) goto label_oom; } tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); if (size == 0) size = 1; @@ -1655,12 +1660,13 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) ret = 0; label_return: if (config_stats && likely(result != NULL)) { - assert(usize == isalloc(tsd, result, config_prof)); + assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); *tsd_thread_allocatedp_get(tsd) += usize; } UTRACE(0, size, result); - JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd, result, usize, false); - witness_assert_lockless(tsd); + JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, + false); + witness_assert_lockless(tsd_tsdn(tsd)); return (ret); label_oom: assert(result == NULL); @@ -1670,7 +1676,7 @@ label_oom: abort(); } ret = ENOMEM; - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); goto label_return; } @@ -1707,7 +1713,7 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) je_calloc(size_t num, size_t size) { void *ret; - tsd_t *tsd; + tsdn_t *tsdn; size_t num_size; size_t usize JEMALLOC_CC_SILENCE_INIT(0); @@ -1727,13 +1733,13 @@ je_calloc(size_t num, size_t size) num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ if (likely(!malloc_slow)) { - ret = ialloc_body(num_size, true, &tsd, &usize, false); - ialloc_post_check(ret, tsd, usize, "calloc", true, false); + ret = ialloc_body(num_size, true, &tsdn, &usize, false); + ialloc_post_check(ret, tsdn, usize, "calloc", true, false); } else { - ret = ialloc_body(num_size, true, &tsd, &usize, true); - ialloc_post_check(ret, tsd, usize, "calloc", true, true); + ret = ialloc_body(num_size, true, &tsdn, &usize, true); + ialloc_post_check(ret, tsdn, usize, "calloc", true, true); UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsd, ret, usize, false); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); } return (ret); @@ -1751,7 +1757,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); if (p == NULL) return (NULL); - arena_prof_promoted(tsd, p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); @@ -1766,7 +1772,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd, old_ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); tctx = prof_alloc_prep(tsd, usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); @@ -1788,16 +1794,16 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) size_t usize; UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); if (config_prof && opt_prof) { - usize = isalloc(tsd, ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); prof_free(tsd, ptr, usize); } else if (config_stats || config_valgrind) - usize = isalloc(tsd, ptr, config_prof); + usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; @@ -1805,7 +1811,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) iqalloc(tsd, ptr, tcache, false); else { if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(tsd, ptr); + rzsize = p2rz(tsd_tsdn(tsd), ptr); iqalloc(tsd, ptr, tcache, true); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } @@ -1816,7 +1822,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); @@ -1826,7 +1832,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) if (config_stats) *tsd_thread_deallocatedp_get(tsd) += usize; if (config_valgrind && unlikely(in_valgrind)) - rzsize = p2rz(tsd, ptr); + rzsize = p2rz(tsd_tsdn(tsd), ptr); isqalloc(tsd, ptr, usize, tcache, slow_path); JEMALLOC_VALGRIND_FREE(ptr, rzsize); } @@ -1837,13 +1843,15 @@ JEMALLOC_ALLOC_SIZE(2) je_realloc(void *ptr, size_t size) { void *ret; - tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); if (unlikely(size == 0)) { if (ptr != NULL) { + tsd_t *tsd; + /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); tsd = tsd_fetch(); @@ -1854,14 +1862,17 @@ je_realloc(void *ptr, size_t size) } if (likely(ptr != NULL)) { + tsd_t *tsd; + assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); - witness_assert_lockless(tsd); - old_usize = isalloc(tsd, ptr, config_prof); + witness_assert_lockless(tsd_tsdn(tsd)); + + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_valgrind && unlikely(in_valgrind)) { - old_rzsize = config_prof ? p2rz(tsd, ptr) : + old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : u2rz(old_usize); } @@ -1875,12 +1886,14 @@ je_realloc(void *ptr, size_t size) usize = s2u(size); ret = iralloc(tsd, ptr, old_usize, size, 0, false); } + tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ if (likely(!malloc_slow)) - ret = ialloc_body(size, false, &tsd, &usize, false); + ret = ialloc_body(size, false, &tsdn, &usize, false); else - ret = ialloc_body(size, false, &tsd, &usize, true); + ret = ialloc_body(size, false, &tsdn, &usize, true); + assert(!tsdn_null(tsdn) || ret == NULL); } if (unlikely(ret == NULL)) { @@ -1892,14 +1905,17 @@ je_realloc(void *ptr, size_t size) set_errno(ENOMEM); } if (config_stats && likely(ret != NULL)) { - assert(usize == isalloc(tsd, ret, config_prof)); + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret, config_prof)); + tsd = tsdn_tsd(tsdn); *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(true, tsd, ret, usize, true, ptr, old_usize, + JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, old_rzsize, true, false); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); return (ret); } @@ -1910,12 +1926,12 @@ je_free(void *ptr) UTRACE(ptr, 0, 0); if (likely(ptr != NULL)) { tsd_t *tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); if (likely(!malloc_slow)) ifree(tsd, ptr, tcache_get(tsd, false), false); else ifree(tsd, ptr, tcache_get(tsd, false), true); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); } } @@ -2012,7 +2028,7 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, *tcache = tcache_get(tsd, true); if ((flags & MALLOCX_ARENA_MASK) != 0) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - *arena = arena_get(tsd, arena_ind, true); + *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(*arena == NULL)) return (true); } else @@ -2021,21 +2037,21 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, } JEMALLOC_ALWAYS_INLINE_C void * -imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, bool slow_path) { szind_t ind; if (unlikely(alignment != 0)) - return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); + return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); ind = size2index(usize); assert(ind < NSIZES); - return (iallocztm(tsd, usize, ind, zero, tcache, false, arena, + return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, slow_path)); } static void * -imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, +imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, bool slow_path) { void *p; @@ -2043,13 +2059,13 @@ imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, if (usize <= SMALL_MAXCLASS) { assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); - p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, - arena, slow_path); + p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, + tcache, arena, slow_path); if (p == NULL) return (NULL); - arena_prof_promoted(tsd, p, usize); + arena_prof_promoted(tsdn, p, usize); } else { - p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena, + p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, slow_path); } @@ -2070,19 +2086,19 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) &zero, &tcache, &arena))) return (NULL); tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); - if (likely((uintptr_t)tctx == (uintptr_t)1U)) - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena, - slow_path); - else if ((uintptr_t)tctx > (uintptr_t)1U) { - p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, - arena, slow_path); + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, + tcache, arena, slow_path); } else p = NULL; if (unlikely(p == NULL)) { prof_alloc_rollback(tsd, tctx, true); return (NULL); } - prof_malloc(tsd, p, *usize, tctx); + prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); @@ -2101,24 +2117,27 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, &zero, &tcache, &arena))) return (NULL); - p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena, - slow_path); + p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, + arena, slow_path); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); return (p); } +/* This function guarantees that *tsdn is non-NULL on success. */ JEMALLOC_ALWAYS_INLINE_C void * -imallocx_body(size_t size, int flags, tsd_t **tsd, size_t *usize, +imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, bool slow_path) { + tsd_t *tsd; if (slow_path && unlikely(malloc_init())) { - *tsd = NULL; + *tsdn = NULL; return (NULL); } - *tsd = tsd_fetch(); - witness_assert_lockless(*tsd); + tsd = tsd_fetch(); + *tsdn = tsd_tsdn(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); if (likely(flags == 0)) { szind_t ind = size2index(size); @@ -2131,17 +2150,17 @@ imallocx_body(size_t size, int flags, tsd_t **tsd, size_t *usize, } if (config_prof && opt_prof) { - return (ialloc_prof(*tsd, *usize, ind, false, + return (ialloc_prof(tsd, *usize, ind, false, slow_path)); } - return (ialloc(*tsd, size, ind, false, slow_path)); + return (ialloc(tsd, size, ind, false, slow_path)); } if (config_prof && opt_prof) - return (imallocx_prof(*tsd, size, flags, usize, slow_path)); + return (imallocx_prof(tsd, size, flags, usize, slow_path)); - return (imallocx_no_prof(*tsd, size, flags, usize, slow_path)); + return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); } JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN @@ -2149,20 +2168,20 @@ void JEMALLOC_NOTHROW * JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) je_mallocx(size_t size, int flags) { - tsd_t *tsd; + tsdn_t *tsdn; void *p; size_t usize; assert(size != 0); if (likely(!malloc_slow)) { - p = imallocx_body(size, flags, &tsd, &usize, false); - ialloc_post_check(p, tsd, usize, "mallocx", false, false); + p = imallocx_body(size, flags, &tsdn, &usize, false); + ialloc_post_check(p, tsdn, usize, "mallocx", false, false); } else { - p = imallocx_body(size, flags, &tsd, &usize, true); - ialloc_post_check(p, tsd, usize, "mallocx", false, true); + p = imallocx_body(size, flags, &tsdn, &usize, true); + ialloc_post_check(p, tsdn, usize, "mallocx", false, true); UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(p != NULL, tsd, p, usize, + JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, MALLOCX_ZERO_GET(flags)); } @@ -2183,7 +2202,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, zero, tcache, arena); if (p == NULL) return (NULL); - arena_prof_promoted(tsd, p, usize); + arena_prof_promoted(tsd_tsdn(tsd), p, usize); } else { p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, tcache, arena); @@ -2202,7 +2221,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd, old_ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); tctx = prof_alloc_prep(tsd, *usize, prof_active, true); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, @@ -2225,7 +2244,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ - *usize = isalloc(tsd, p, config_prof); + *usize = isalloc(tsd_tsdn(tsd), p, config_prof); } prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, old_usize, old_tctx); @@ -2253,11 +2272,11 @@ je_rallocx(void *ptr, size_t size, int flags) assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); - arena = arena_get(tsd, arena_ind, true); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); if (unlikely(arena == NULL)) goto label_oom; } else @@ -2271,7 +2290,7 @@ je_rallocx(void *ptr, size_t size, int flags) } else tcache = tcache_get(tsd, true); - old_usize = isalloc(tsd, ptr, config_prof); + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); @@ -2289,7 +2308,7 @@ je_rallocx(void *ptr, size_t size, int flags) if (unlikely(p == NULL)) goto label_oom; if (config_stats || (config_valgrind && unlikely(in_valgrind))) - usize = isalloc(tsd, p, config_prof); + usize = isalloc(tsd_tsdn(tsd), p, config_prof); } assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); @@ -2298,9 +2317,9 @@ je_rallocx(void *ptr, size_t size, int flags) *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(true, tsd, p, usize, false, ptr, old_usize, - old_rzsize, false, zero); - witness_assert_lockless(tsd); + JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, + old_usize, old_rzsize, false, zero); + witness_assert_lockless(tsd_tsdn(tsd)); return (p); label_oom: if (config_xmalloc && unlikely(opt_xmalloc)) { @@ -2308,32 +2327,32 @@ label_oom: abort(); } UTRACE(ptr, size, 0); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); return (NULL); } JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero)) + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) return (old_usize); - usize = isalloc(tsd, ptr, config_prof); + usize = isalloc(tsdn, ptr, config_prof); return (usize); } static size_t -ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; if (tctx == NULL) return (old_usize); - usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment, + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, zero); return (usize); @@ -2348,7 +2367,7 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, prof_tctx_t *old_tctx, *tctx; prof_active = prof_active_get_unlocked(); - old_tctx = prof_tctx_get(tsd, ptr); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); /* * usize isn't knowable before ixalloc() returns when extra is non-zero. * Therefore, compute its maximum possible value and use that in @@ -2373,11 +2392,11 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { - usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra, - alignment, zero, tctx); + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); } else { - usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, - alignment, zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (usize == old_usize) { prof_alloc_rollback(tsd, tctx, false); @@ -2404,9 +2423,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); - old_usize = isalloc(tsd, ptr, config_prof); + old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); /* * The API explicitly absolves itself of protecting against (size + @@ -2431,8 +2450,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, alignment, zero); } else { - usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, - alignment, zero); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } if (unlikely(usize == old_usize)) goto label_not_resized; @@ -2441,11 +2460,11 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) *tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(false, tsd, ptr, usize, false, ptr, old_usize, - old_rzsize, false, zero); + JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, + old_usize, old_rzsize, false, zero); label_not_resized: UTRACE(ptr, size, ptr); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); return (usize); } @@ -2454,20 +2473,20 @@ JEMALLOC_ATTR(pure) je_sallocx(const void *ptr, int flags) { size_t usize; - tsd_t *tsd; + tsdn_t *tsdn; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); - tsd = tsd_fetch(); - witness_assert_lockless(tsd); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); if (config_ivsalloc) - usize = ivsalloc(tsd, ptr, config_prof); + usize = ivsalloc(tsdn, ptr, config_prof); else - usize = isalloc(tsd, ptr, config_prof); + usize = isalloc(tsdn, ptr, config_prof); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); return (usize); } @@ -2481,7 +2500,7 @@ je_dallocx(void *ptr, int flags) assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; @@ -2495,21 +2514,21 @@ je_dallocx(void *ptr, int flags) ifree(tsd, ptr, tcache, false); else ifree(tsd, ptr, tcache, true); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_ALWAYS_INLINE_C size_t -inallocx(tsd_t *tsd, size_t size, int flags) +inallocx(tsdn_t *tsdn, size_t size, int flags) { size_t usize; - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) usize = s2u(size); else usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); return (usize); } @@ -2523,10 +2542,10 @@ je_sdallocx(void *ptr, size_t size, int flags) assert(ptr != NULL); assert(malloc_initialized() || IS_INITIALIZER); tsd = tsd_fetch(); - usize = inallocx(tsd, size, flags); - assert(usize == isalloc(tsd, ptr, config_prof)); + usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) tcache = NULL; @@ -2540,7 +2559,7 @@ je_sdallocx(void *ptr, size_t size, int flags) isfree(tsd, ptr, usize, tcache, false); else isfree(tsd, ptr, usize, tcache, true); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @@ -2548,21 +2567,21 @@ JEMALLOC_ATTR(pure) je_nallocx(size_t size, int flags) { size_t usize; - tsd_t *tsd; + tsdn_t *tsdn; assert(size != 0); if (unlikely(malloc_init())) return (0); - tsd = tsd_fetch(); - witness_assert_lockless(tsd); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); - usize = inallocx(tsd, size, flags); + usize = inallocx(tsdn, size, flags); if (unlikely(usize > HUGE_MAXCLASS)) return (0); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); return (usize); } @@ -2577,9 +2596,9 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, return (EAGAIN); tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); return (ret); } @@ -2587,15 +2606,15 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { int ret; - tsd_t *tsd; + tsdn_t *tsdn; if (unlikely(malloc_init())) return (EAGAIN); - tsd = tsd_fetch(); - witness_assert_lockless(tsd); - ret = ctl_nametomib(tsd, name, mibp, miblenp); - witness_assert_lockless(tsd); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); + ret = ctl_nametomib(tsdn, name, mibp, miblenp); + witness_assert_lockless(tsdn); return (ret); } @@ -2610,9 +2629,9 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, return (EAGAIN); tsd = tsd_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); return (ret); } @@ -2620,32 +2639,32 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts) { - tsd_t *tsd; + tsdn_t *tsdn; - tsd = tsd_fetch(); - witness_assert_lockless(tsd); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); stats_print(write_cb, cbopaque, opts); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); } JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; - tsd_t *tsd; + tsdn_t *tsdn; assert(malloc_initialized() || IS_INITIALIZER); malloc_thread_init(); - tsd = tsd_fetch(); - witness_assert_lockless(tsd); + tsdn = tsdn_fetch(); + witness_assert_lockless(tsdn); if (config_ivsalloc) - ret = ivsalloc(tsd, ptr, config_prof); + ret = ivsalloc(tsdn, ptr, config_prof); else - ret = (ptr == NULL) ? 0 : isalloc(tsd, ptr, config_prof); + ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); return (ret); } @@ -2705,28 +2724,35 @@ _malloc_prefork(void) witness_prefork(tsd); /* Acquire all mutexes in a safe order. */ - ctl_prefork(tsd); - malloc_mutex_prefork(tsd, &arenas_lock); - prof_prefork0(tsd); + ctl_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + prof_prefork0(tsd_tsdn(tsd)); for (i = 0; i < 3; i++) { for (j = 0; j < narenas; j++) { - if ((arena = arena_get(tsd, j, false)) != NULL) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { switch (i) { - case 0: arena_prefork0(tsd, arena); break; - case 1: arena_prefork1(tsd, arena); break; - case 2: arena_prefork2(tsd, arena); break; + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; default: not_reached(); } } } } - base_prefork(tsd); - chunk_prefork(tsd); + base_prefork(tsd_tsdn(tsd)); + chunk_prefork(tsd_tsdn(tsd)); for (i = 0; i < narenas; i++) { - if ((arena = arena_get(tsd, i, false)) != NULL) - arena_prefork3(tsd, arena); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_prefork3(tsd_tsdn(tsd), arena); } - prof_prefork1(tsd); + prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB @@ -2750,17 +2776,17 @@ _malloc_postfork(void) witness_postfork_parent(tsd); /* Release all mutexes, now that fork() has completed. */ - chunk_postfork_parent(tsd); - base_postfork_parent(tsd); + chunk_postfork_parent(tsd_tsdn(tsd)); + base_postfork_parent(tsd_tsdn(tsd)); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - if ((arena = arena_get(tsd, i, false)) != NULL) - arena_postfork_parent(tsd, arena); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_parent(tsd_tsdn(tsd), arena); } - prof_postfork_parent(tsd); - malloc_mutex_postfork_parent(tsd, &arenas_lock); - ctl_postfork_parent(tsd); + prof_postfork_parent(tsd_tsdn(tsd)); + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + ctl_postfork_parent(tsd_tsdn(tsd)); } void @@ -2775,17 +2801,17 @@ jemalloc_postfork_child(void) witness_postfork_child(tsd); /* Release all mutexes, now that fork() has completed. */ - chunk_postfork_child(tsd); - base_postfork_child(tsd); + chunk_postfork_child(tsd_tsdn(tsd)); + base_postfork_child(tsd_tsdn(tsd)); for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { arena_t *arena; - if ((arena = arena_get(tsd, i, false)) != NULL) - arena_postfork_child(tsd, arena); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) + arena_postfork_child(tsd_tsdn(tsd), arena); } - prof_postfork_child(tsd); - malloc_mutex_postfork_child(tsd, &arenas_lock); - ctl_postfork_child(tsd); + prof_postfork_child(tsd_tsdn(tsd)); + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ diff --git a/src/mutex.c b/src/mutex.c index 4174f42e..a1fac342 100644 --- a/src/mutex.c +++ b/src/mutex.c @@ -109,25 +109,25 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) } void -malloc_mutex_prefork(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { - malloc_mutex_lock(tsd, mutex); + malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { - malloc_mutex_unlock(tsd, mutex); + malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(tsd_t *tsd, malloc_mutex_t *mutex) +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(tsd, mutex); + malloc_mutex_unlock(tsdn, mutex); #else if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank)) { diff --git a/src/prof.c b/src/prof.c index df7f1f9b..c1f58d46 100644 --- a/src/prof.c +++ b/src/prof.c @@ -121,13 +121,13 @@ static bool prof_booted = false; * definition. */ -static bool prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); -static bool prof_tdata_should_destroy(tsd_t *tsd, prof_tdata_t *tdata, +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); -static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, +static void prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached); -static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); /******************************************************************************/ /* Red-black trees. */ @@ -213,23 +213,23 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) } if ((uintptr_t)tctx > (uintptr_t)1U) { - malloc_mutex_lock(tsd, tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); tctx->prepared = false; - if (prof_tctx_should_destroy(tsd, tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) prof_tctx_destroy(tsd, tctx); else - malloc_mutex_unlock(tsd, tctx->tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } } void -prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize, +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { - prof_tctx_set(tsd, ptr, usize, tctx); + prof_tctx_set(tsdn, ptr, usize, tctx); - malloc_mutex_lock(tsd, tctx->tdata->lock); + malloc_mutex_lock(tsdn, tctx->tdata->lock); tctx->cnts.curobjs++; tctx->cnts.curbytes += usize; if (opt_prof_accum) { @@ -237,23 +237,23 @@ prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize, tctx->cnts.accumbytes += usize; } tctx->prepared = false; - malloc_mutex_unlock(tsd, tctx->tdata->lock); + malloc_mutex_unlock(tsdn, tctx->tdata->lock); } void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { - malloc_mutex_lock(tsd, tctx->tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs > 0); assert(tctx->cnts.curbytes >= usize); tctx->cnts.curobjs--; tctx->cnts.curbytes -= usize; - if (prof_tctx_should_destroy(tsd, tctx)) + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) prof_tctx_destroy(tsd, tctx); else - malloc_mutex_unlock(tsd, tctx->tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); } void @@ -278,7 +278,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata) tdata->enq = true; } - malloc_mutex_lock(tsd, &bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); } JEMALLOC_INLINE_C void @@ -288,7 +288,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) cassert(config_prof); assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(tsd, &bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); if (tdata != NULL) { bool idump, gdump; @@ -301,9 +301,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata) tdata->enq_gdump = false; if (idump) - prof_idump(tsd); + prof_idump(tsd_tsdn(tsd)); if (gdump) - prof_gdump(tsd); + prof_gdump(tsd_tsdn(tsd)); } } @@ -547,14 +547,14 @@ prof_tdata_mutex_choose(uint64_t thr_uid) } static prof_gctx_t * -prof_gctx_create(tsd_t *tsd, prof_bt_t *bt) +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { /* * Create a single allocation that has space for vec of length bt->len. */ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); - prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size, - size2index(size), false, NULL, true, arena_get(NULL, 0, true), + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (gctx == NULL) return (NULL); @@ -587,32 +587,32 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, * into this function. */ prof_enter(tsd, tdata_self); - malloc_mutex_lock(tsd, gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); assert(gctx->nlimbo != 0); if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { /* Remove gctx from bt2gctx. */ - if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) + if (ckh_remove(tsd_tsdn(tsd), &bt2gctx, &gctx->bt, NULL, NULL)) not_reached(); prof_leave(tsd, tdata_self); /* Destroy gctx. */ - malloc_mutex_unlock(tsd, gctx->lock); - idalloctm(tsd, gctx, NULL, true, true); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true); } else { /* * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ gctx->nlimbo--; - malloc_mutex_unlock(tsd, gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_leave(tsd, tdata_self); } } static bool -prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx) +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { - malloc_mutex_assert_owner(tsd, tctx->tdata->lock); + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); if (opt_prof_accum) return (false); @@ -643,7 +643,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) prof_gctx_t *gctx = tctx->gctx; bool destroy_tdata, destroy_tctx, destroy_gctx; - malloc_mutex_assert_owner(tsd, tctx->tdata->lock); + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curbytes == 0); @@ -651,11 +651,11 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) assert(tctx->cnts.accumobjs == 0); assert(tctx->cnts.accumbytes == 0); - ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); - destroy_tdata = prof_tdata_should_destroy(tsd, tdata, false); - malloc_mutex_unlock(tsd, tdata->lock); + ckh_remove(tsd_tsdn(tsd), &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); - malloc_mutex_lock(tsd, gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: tctx_tree_remove(&gctx->tctxs, tctx); @@ -695,19 +695,19 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) destroy_tctx = false; destroy_gctx = false; } - malloc_mutex_unlock(tsd, gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); if (destroy_gctx) { prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, tdata); } - malloc_mutex_assert_not_owner(tsd, tctx->tdata->lock); + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); if (destroy_tdata) - prof_tdata_destroy(tsd, tdata, false); + prof_tdata_destroy(tsd_tsdn(tsd), tdata, false); if (destroy_tctx) - idalloctm(tsd, tctx, NULL, true, true); + idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true); } static bool @@ -727,16 +727,16 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, prof_enter(tsd, tdata); if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - gctx.p = prof_gctx_create(tsd, bt); + gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); if (gctx.v == NULL) { prof_leave(tsd, tdata); return (true); } btkey.p = &gctx.p->bt; - if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) { /* OOM. */ prof_leave(tsd, tdata); - idalloctm(tsd, gctx.v, NULL, true, true); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true); return (true); } new_gctx = true; @@ -745,9 +745,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, * Increment nlimbo, in order to avoid a race condition with * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(tsd, gctx.p->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); gctx.p->nlimbo++; - malloc_mutex_unlock(tsd, gctx.p->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); new_gctx = false; } prof_leave(tsd, tdata); @@ -774,11 +774,11 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) if (tdata == NULL) return (NULL); - malloc_mutex_lock(tsd, tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); if (!not_found) /* Note double negative! */ ret.p->prepared = true; - malloc_mutex_unlock(tsd, tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (not_found) { void *btkey; prof_gctx_t *gctx; @@ -793,9 +793,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) return (NULL); /* Link a prof_tctx_t into gctx for this thread. */ - ret.v = iallocztm(tsd, sizeof(prof_tctx_t), + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), size2index(sizeof(prof_tctx_t)), false, NULL, true, - arena_ichoose(tsd, NULL), true); + arena_ichoose(tsd_tsdn(tsd), NULL), true); if (ret.p == NULL) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); @@ -809,20 +809,21 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) ret.p->tctx_uid = tdata->tctx_uid_next++; ret.p->prepared = true; ret.p->state = prof_tctx_state_initializing; - malloc_mutex_lock(tsd, tdata->lock); - error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); - malloc_mutex_unlock(tsd, tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd_tsdn(tsd), &tdata->bt2tctx, btkey, + ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (error) { if (new_gctx) prof_gctx_try_destroy(tsd, tdata, gctx, tdata); - idalloctm(tsd, ret.v, NULL, true, true); + idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true); return (NULL); } - malloc_mutex_lock(tsd, gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); ret.p->state = prof_tctx_state_nominal; tctx_tree_insert(&gctx->tctxs, ret.p); gctx->nlimbo--; - malloc_mutex_unlock(tsd, gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } return (ret.p); @@ -897,13 +898,13 @@ size_t prof_tdata_count(void) { size_t tdata_count = 0; - tsd_t *tsd; + tsdn_t *tsdn; - tsd = tsd_fetch(); - malloc_mutex_lock(tsd, &tdatas_mtx); + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, (void *)&tdata_count); - malloc_mutex_unlock(tsd, &tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (tdata_count); } @@ -922,9 +923,9 @@ prof_bt_count(void) if (tdata == NULL) return (0); - malloc_mutex_lock(tsd, &bt2gctx_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); bt_count = ckh_count(&bt2gctx); - malloc_mutex_unlock(tsd, &bt2gctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); return (bt_count); } @@ -1038,20 +1039,20 @@ prof_dump_printf(bool propagate_err, const char *format, ...) } static void -prof_tctx_merge_tdata(tsd_t *tsd, prof_tctx_t *tctx, prof_tdata_t *tdata) +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { - malloc_mutex_assert_owner(tsd, tctx->tdata->lock); + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - malloc_mutex_lock(tsd, tctx->gctx->lock); + malloc_mutex_lock(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: - malloc_mutex_unlock(tsd, tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); return; case prof_tctx_state_nominal: tctx->state = prof_tctx_state_dumping; - malloc_mutex_unlock(tsd, tctx->gctx->lock); + malloc_mutex_unlock(tsdn, tctx->gctx->lock); memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); @@ -1071,10 +1072,10 @@ prof_tctx_merge_tdata(tsd_t *tsd, prof_tctx_t *tctx, prof_tdata_t *tdata) } static void -prof_tctx_merge_gctx(tsd_t *tsd, prof_tctx_t *tctx, prof_gctx_t *gctx) +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { - malloc_mutex_assert_owner(tsd, gctx->lock); + malloc_mutex_assert_owner(tsdn, gctx->lock); gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; @@ -1087,9 +1088,9 @@ prof_tctx_merge_gctx(tsd_t *tsd, prof_tctx_t *tctx, prof_gctx_t *gctx) static prof_tctx_t * prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsd_t *tsd = (tsd_t *)arg; + tsdn_t *tsdn = (tsdn_t *)arg; - malloc_mutex_assert_owner(tsd, tctx->gctx->lock); + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: @@ -1097,7 +1098,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) break; case prof_tctx_state_dumping: case prof_tctx_state_purgatory: - prof_tctx_merge_gctx(tsd, tctx, tctx->gctx); + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); break; default: not_reached(); @@ -1107,7 +1108,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) } struct prof_tctx_dump_iter_arg_s { - tsd_t *tsd; + tsdn_t *tsdn; bool propagate_err; }; @@ -1117,7 +1118,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) struct prof_tctx_dump_iter_arg_s *arg = (struct prof_tctx_dump_iter_arg_s *)opaque; - malloc_mutex_assert_owner(arg->tsd, tctx->gctx->lock); + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_initializing: @@ -1142,10 +1143,10 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) static prof_tctx_t * prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { - tsd_t *tsd = (tsd_t *)arg; + tsdn_t *tsdn = (tsdn_t *)arg; prof_tctx_t *ret; - malloc_mutex_assert_owner(tsd, tctx->gctx->lock); + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); switch (tctx->state) { case prof_tctx_state_nominal: @@ -1167,12 +1168,12 @@ label_return: } static void -prof_dump_gctx_prep(tsd_t *tsd, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(tsd, gctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* * Increment nlimbo so that gctx won't go away before dump. @@ -1184,11 +1185,11 @@ prof_dump_gctx_prep(tsd_t *tsd, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - malloc_mutex_unlock(tsd, gctx->lock); + malloc_mutex_unlock(tsdn, gctx->lock); } struct prof_gctx_merge_iter_arg_s { - tsd_t *tsd; + tsdn_t *tsdn; size_t leak_ngctx; }; @@ -1198,12 +1199,12 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) struct prof_gctx_merge_iter_arg_s *arg = (struct prof_gctx_merge_iter_arg_s *)opaque; - malloc_mutex_lock(arg->tsd, gctx->lock); + malloc_mutex_lock(arg->tsdn, gctx->lock); tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, - (void *)arg->tsd); + (void *)arg->tsdn); if (gctx->cnt_summed.curobjs != 0) arg->leak_ngctx++; - malloc_mutex_unlock(arg->tsd, gctx->lock); + malloc_mutex_unlock(arg->tsdn, gctx->lock); return (NULL); } @@ -1222,7 +1223,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) */ while ((gctx = gctx_tree_first(gctxs)) != NULL) { gctx_tree_remove(gctxs, gctx); - malloc_mutex_lock(tsd, gctx->lock); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); { prof_tctx_t *next; @@ -1230,14 +1231,15 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) do { prof_tctx_t *to_destroy = tctx_tree_iter(&gctx->tctxs, next, - prof_tctx_finish_iter, (void *)tsd); + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); if (to_destroy != NULL) { next = tctx_tree_next(&gctx->tctxs, to_destroy); tctx_tree_remove(&gctx->tctxs, to_destroy); - idalloctm(tsd, to_destroy, NULL, true, - true); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, true, true); } else next = NULL; } while (next != NULL); @@ -1245,15 +1247,15 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) gctx->nlimbo--; if (prof_gctx_should_destroy(gctx)) { gctx->nlimbo++; - malloc_mutex_unlock(tsd, gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } else - malloc_mutex_unlock(tsd, gctx->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } struct prof_tdata_merge_iter_arg_s { - tsd_t *tsd; + tsdn_t *tsdn; prof_cnt_t cnt_all; }; @@ -1264,7 +1266,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, struct prof_tdata_merge_iter_arg_s *arg = (struct prof_tdata_merge_iter_arg_s *)opaque; - malloc_mutex_lock(arg->tsd, tdata->lock); + malloc_mutex_lock(arg->tsdn, tdata->lock); if (!tdata->expired) { size_t tabind; union { @@ -1276,7 +1278,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, &tctx.v);) - prof_tctx_merge_tdata(arg->tsd, tctx.p, tdata); + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; @@ -1286,7 +1288,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, } } else tdata->dumping = false; - malloc_mutex_unlock(arg->tsd, tdata->lock); + malloc_mutex_unlock(arg->tsdn, tdata->lock); return (NULL); } @@ -1315,7 +1317,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) #define prof_dump_header JEMALLOC_N(prof_dump_header_impl) #endif static bool -prof_dump_header(tsd_t *tsd, bool propagate_err, const prof_cnt_t *cnt_all) +prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { bool ret; @@ -1326,10 +1328,10 @@ prof_dump_header(tsd_t *tsd, bool propagate_err, const prof_cnt_t *cnt_all) cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) return (true); - malloc_mutex_lock(tsd, &tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, (void *)&propagate_err) != NULL); - malloc_mutex_unlock(tsd, &tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (ret); } #ifdef JEMALLOC_JET @@ -1339,7 +1341,7 @@ prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl); #endif static bool -prof_dump_gctx(tsd_t *tsd, bool propagate_err, prof_gctx_t *gctx, +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; @@ -1347,7 +1349,7 @@ prof_dump_gctx(tsd_t *tsd, bool propagate_err, prof_gctx_t *gctx, struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); - malloc_mutex_assert_owner(tsd, gctx->lock); + malloc_mutex_assert_owner(tsdn, gctx->lock); /* Avoid dumping such gctx's that have no useful data. */ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || @@ -1381,7 +1383,7 @@ prof_dump_gctx(tsd_t *tsd, bool propagate_err, prof_gctx_t *gctx, goto label_return; } - prof_tctx_dump_iter_arg.tsd = tsd; + prof_tctx_dump_iter_arg.tsdn = tsdn; prof_tctx_dump_iter_arg.propagate_err = propagate_err; if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, (void *)&prof_tctx_dump_iter_arg) != NULL) { @@ -1515,7 +1517,7 @@ prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, } struct prof_gctx_dump_iter_arg_s { - tsd_t *tsd; + tsdn_t *tsdn; bool propagate_err; }; @@ -1526,9 +1528,9 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) struct prof_gctx_dump_iter_arg_s *arg = (struct prof_gctx_dump_iter_arg_s *)opaque; - malloc_mutex_lock(arg->tsd, gctx->lock); + malloc_mutex_lock(arg->tsdn, gctx->lock); - if (prof_dump_gctx(arg->tsd, arg->propagate_err, gctx, &gctx->bt, + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, gctxs)) { ret = gctx; goto label_return; @@ -1536,7 +1538,7 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) ret = NULL; label_return: - malloc_mutex_unlock(arg->tsd, gctx->lock); + malloc_mutex_unlock(arg->tsdn, gctx->lock); return (ret); } @@ -1560,7 +1562,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) if (tdata == NULL) return (true); - malloc_mutex_lock(tsd, &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); prof_enter(tsd, tdata); /* @@ -1569,21 +1571,21 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) */ gctx_tree_new(&gctxs); for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) - prof_dump_gctx_prep(tsd, gctx.p, &gctxs); + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs); /* * Iterate over tdatas, and for the non-expired ones snapshot their tctx * stats and merge them into the associated gctx's. */ - prof_tdata_merge_iter_arg.tsd = tsd; + prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd); memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t)); - malloc_mutex_lock(tsd, &tdatas_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&prof_tdata_merge_iter_arg); - malloc_mutex_unlock(tsd, &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); /* Merge tctx stats into gctx's. */ - prof_gctx_merge_iter_arg.tsd = tsd; + prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd); prof_gctx_merge_iter_arg.leak_ngctx = 0; gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&prof_gctx_merge_iter_arg); @@ -1595,12 +1597,12 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) goto label_open_close_error; /* Dump profile header. */ - if (prof_dump_header(tsd, propagate_err, + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, &prof_tdata_merge_iter_arg.cnt_all)) goto label_write_error; /* Dump per gctx profile stats. */ - prof_gctx_dump_iter_arg.tsd = tsd; + prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd); prof_gctx_dump_iter_arg.propagate_err = propagate_err; if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, (void *)&prof_gctx_dump_iter_arg) != NULL) @@ -1614,7 +1616,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck) goto label_open_close_error; prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(tsd, &prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); if (leakcheck) { prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, @@ -1625,7 +1627,7 @@ label_write_error: prof_dump_close(propagate_err); label_open_close_error: prof_gctx_finish(tsd, &gctxs); - malloc_mutex_unlock(tsd, &prof_dump_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); return (true); } @@ -1665,21 +1667,23 @@ prof_fdump(void) return; tsd = tsd_fetch(); - malloc_mutex_lock(tsd, &prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(tsd, &prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, opt_prof_leak); } void -prof_idump(tsd_t *tsd) +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted || tsd == NULL) + if (!prof_booted || tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; @@ -1690,10 +1694,10 @@ prof_idump(tsd_t *tsd) if (opt_prof_prefix[0] != '\0') { char filename[PATH_MAX + 1]; - malloc_mutex_lock(tsd, &prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(tsd, &prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } @@ -1712,24 +1716,26 @@ prof_mdump(tsd_t *tsd, const char *filename) /* No filename specified, so automatically generate one. */ if (opt_prof_prefix[0] == '\0') return (true); - malloc_mutex_lock(tsd, &prof_dump_seq_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(tsd, &prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } return (prof_dump(tsd, true, filename, false)); } void -prof_gdump(tsd_t *tsd) +prof_gdump(tsdn_t *tsdn) { + tsd_t *tsd; prof_tdata_t *tdata; cassert(config_prof); - if (!prof_booted || tsd == NULL) + if (!prof_booted || tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); tdata = prof_tdata_get(tsd, false); if (tdata == NULL) return; @@ -1740,10 +1746,10 @@ prof_gdump(tsd_t *tsd) if (opt_prof_prefix[0] != '\0') { char filename[DUMP_FILENAME_BUFSIZE]; - malloc_mutex_lock(tsd, &prof_dump_seq_mtx); + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(tsd, &prof_dump_seq_mtx); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); prof_dump(tsd, false, filename, false); } } @@ -1772,20 +1778,20 @@ prof_bt_keycomp(const void *k1, const void *k2) } JEMALLOC_INLINE_C uint64_t -prof_thr_uid_alloc(tsd_t *tsd) +prof_thr_uid_alloc(tsdn_t *tsdn) { uint64_t thr_uid; - malloc_mutex_lock(tsd, &next_thr_uid_mtx); + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); thr_uid = next_thr_uid; next_thr_uid++; - malloc_mutex_unlock(tsd, &next_thr_uid_mtx); + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); return (thr_uid); } static prof_tdata_t * -prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, +prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim, char *thread_name, bool active) { prof_tdata_t *tdata; @@ -1793,9 +1799,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, cassert(config_prof); /* Initialize an empty cache for this thread. */ - tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), - size2index(sizeof(prof_tdata_t)), false, NULL, true, arena_get(NULL, - 0, true), true); + tdata = (prof_tdata_t *)iallocztm(tsdn, sizeof(prof_tdata_t), + size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (tdata == NULL) return (NULL); @@ -1807,9 +1813,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->expired = false; tdata->tctx_uid_next = 0; - if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, + if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) { - idalloctm(tsd, tdata, NULL, true, true); + idalloctm(tsdn, tdata, NULL, true, true); return (NULL); } @@ -1823,24 +1829,23 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, tdata->dumping = false; tdata->active = active; - malloc_mutex_lock(tsd, &tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); tdata_tree_insert(&tdatas, tdata); - malloc_mutex_unlock(tsd, &tdatas_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); return (tdata); } prof_tdata_t * -prof_tdata_init(tsd_t *tsd) +prof_tdata_init(tsdn_t *tsdn) { - return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd), 0, NULL, - prof_thread_active_init_get(tsd))); + return (prof_tdata_init_impl(tsdn, prof_thr_uid_alloc(tsdn), 0, NULL, + prof_thread_active_init_get(tsdn))); } static bool -prof_tdata_should_destroy_unlocked(tsd_t *tsd, prof_tdata_t *tdata, - bool even_if_attached) +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { if (tdata->attached && !even_if_attached) @@ -1851,43 +1856,41 @@ prof_tdata_should_destroy_unlocked(tsd_t *tsd, prof_tdata_t *tdata, } static bool -prof_tdata_should_destroy(tsd_t *tsd, prof_tdata_t *tdata, +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_assert_owner(tsd, tdata->lock); + malloc_mutex_assert_owner(tsdn, tdata->lock); - return (prof_tdata_should_destroy_unlocked(tsd, tdata, - even_if_attached)); + return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); } static void -prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, +prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_assert_owner(tsd, &tdatas_mtx); + malloc_mutex_assert_owner(tsdn, &tdatas_mtx); - assert(tsd_prof_tdata_get(tsd) != tdata); + assert(tsdn_null(tsdn) || tsd_prof_tdata_get(tsdn_tsd(tsdn)) != tdata); tdata_tree_remove(&tdatas, tdata); - assert(prof_tdata_should_destroy_unlocked(tsd, tdata, - even_if_attached)); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); if (tdata->thread_name != NULL) - idalloctm(tsd, tdata->thread_name, NULL, true, true); - ckh_delete(tsd, &tdata->bt2tctx); - idalloctm(tsd, tdata, NULL, true, true); + idalloctm(tsdn, tdata->thread_name, NULL, true, true); + ckh_delete(tsdn, &tdata->bt2tctx); + idalloctm(tsdn, tdata, NULL, true, true); } static void -prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) +prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached) { - malloc_mutex_lock(tsd, &tdatas_mtx); - prof_tdata_destroy_locked(tsd, tdata, even_if_attached); - malloc_mutex_unlock(tsd, &tdatas_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); + prof_tdata_destroy_locked(tsdn, tdata, even_if_attached); + malloc_mutex_unlock(tsdn, &tdatas_mtx); } static void @@ -1895,9 +1898,10 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tsd, tdata->lock); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); if (tdata->attached) { - destroy_tdata = prof_tdata_should_destroy(tsd, tdata, true); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* * Only detach if !destroy_tdata, because detaching would allow * another thread to win the race to destroy tdata. @@ -1907,9 +1911,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) tsd_prof_tdata_set(tsd, NULL); } else destroy_tdata = false; - malloc_mutex_unlock(tsd, tdata->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); if (destroy_tdata) - prof_tdata_destroy(tsd, tdata, true); + prof_tdata_destroy(tsd_tsdn(tsd), tdata, true); } prof_tdata_t * @@ -1918,27 +1922,27 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) uint64_t thr_uid = tdata->thr_uid; uint64_t thr_discrim = tdata->thr_discrim + 1; char *thread_name = (tdata->thread_name != NULL) ? - prof_thread_name_alloc(tsd, tdata->thread_name) : NULL; + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; bool active = tdata->active; prof_tdata_detach(tsd, tdata); - return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, - active)); + return (prof_tdata_init_impl(tsd_tsdn(tsd), thr_uid, thr_discrim, + thread_name, active)); } static bool -prof_tdata_expire(tsd_t *tsd, prof_tdata_t *tdata) +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { bool destroy_tdata; - malloc_mutex_lock(tsd, tdata->lock); + malloc_mutex_lock(tsdn, tdata->lock); if (!tdata->expired) { tdata->expired = true; destroy_tdata = tdata->attached ? false : - prof_tdata_should_destroy(tsd, tdata, false); + prof_tdata_should_destroy(tsdn, tdata, false); } else destroy_tdata = false; - malloc_mutex_unlock(tsd, tdata->lock); + malloc_mutex_unlock(tsdn, tdata->lock); return (destroy_tdata); } @@ -1946,36 +1950,36 @@ prof_tdata_expire(tsd_t *tsd, prof_tdata_t *tdata) static prof_tdata_t * prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg) { - tsd_t *tsd = (tsd_t *)arg; + tsdn_t *tsdn = (tsdn_t *)arg; - return (prof_tdata_expire(tsd, tdata) ? tdata : NULL); + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); } void -prof_reset(tsd_t *tsd, size_t lg_sample) +prof_reset(tsdn_t *tsdn, size_t lg_sample) { prof_tdata_t *next; assert(lg_sample < (sizeof(uint64_t) << 3)); - malloc_mutex_lock(tsd, &prof_dump_mtx); - malloc_mutex_lock(tsd, &tdatas_mtx); + malloc_mutex_lock(tsdn, &prof_dump_mtx); + malloc_mutex_lock(tsdn, &tdatas_mtx); lg_prof_sample = lg_sample; next = NULL; do { prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, - prof_tdata_reset_iter, (void *)tsd); + prof_tdata_reset_iter, (void *)tsdn); if (to_destroy != NULL) { next = tdata_tree_next(&tdatas, to_destroy); - prof_tdata_destroy_locked(tsd, to_destroy, false); + prof_tdata_destroy_locked(tsdn, to_destroy, false); } else next = NULL; } while (next != NULL); - malloc_mutex_unlock(tsd, &tdatas_mtx); - malloc_mutex_unlock(tsd, &prof_dump_mtx); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + malloc_mutex_unlock(tsdn, &prof_dump_mtx); } void @@ -1992,25 +1996,25 @@ prof_tdata_cleanup(tsd_t *tsd) } bool -prof_active_get(tsd_t *tsd) +prof_active_get(tsdn_t *tsdn) { bool prof_active_current; - malloc_mutex_lock(tsd, &prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_current = prof_active; - malloc_mutex_unlock(tsd, &prof_active_mtx); + malloc_mutex_unlock(tsdn, &prof_active_mtx); return (prof_active_current); } bool -prof_active_set(tsd_t *tsd, bool active) +prof_active_set(tsdn_t *tsdn, bool active) { bool prof_active_old; - malloc_mutex_lock(tsd, &prof_active_mtx); + malloc_mutex_lock(tsdn, &prof_active_mtx); prof_active_old = prof_active; prof_active = active; - malloc_mutex_unlock(tsd, &prof_active_mtx); + malloc_mutex_unlock(tsdn, &prof_active_mtx); return (prof_active_old); } @@ -2026,7 +2030,7 @@ prof_thread_name_get(tsd_t *tsd) } static char * -prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { char *ret; size_t size; @@ -2038,8 +2042,8 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) if (size == 1) return (""); - ret = iallocztm(tsd, size, size2index(size), false, NULL, true, - arena_get(NULL, 0, true), true); + ret = iallocztm(tsdn, size, size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); if (ret == NULL) return (NULL); memcpy(ret, thread_name, size); @@ -2066,12 +2070,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) return (EFAULT); } - s = prof_thread_name_alloc(tsd, thread_name); + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); if (s == NULL) return (EAGAIN); if (tdata->thread_name != NULL) { - idalloctm(tsd, tdata->thread_name, NULL, true, true); + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true); tdata->thread_name = NULL; } if (strlen(s) > 0) @@ -2103,48 +2107,48 @@ prof_thread_active_set(tsd_t *tsd, bool active) } bool -prof_thread_active_init_get(tsd_t *tsd) +prof_thread_active_init_get(tsdn_t *tsdn) { bool active_init; - malloc_mutex_lock(tsd, &prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init = prof_thread_active_init; - malloc_mutex_unlock(tsd, &prof_thread_active_init_mtx); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return (active_init); } bool -prof_thread_active_init_set(tsd_t *tsd, bool active_init) +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { bool active_init_old; - malloc_mutex_lock(tsd, &prof_thread_active_init_mtx); + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); active_init_old = prof_thread_active_init; prof_thread_active_init = active_init; - malloc_mutex_unlock(tsd, &prof_thread_active_init_mtx); + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); return (active_init_old); } bool -prof_gdump_get(tsd_t *tsd) +prof_gdump_get(tsdn_t *tsdn) { bool prof_gdump_current; - malloc_mutex_lock(tsd, &prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_current = prof_gdump_val; - malloc_mutex_unlock(tsd, &prof_gdump_mtx); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return (prof_gdump_current); } bool -prof_gdump_set(tsd_t *tsd, bool gdump) +prof_gdump_set(tsdn_t *tsdn, bool gdump) { bool prof_gdump_old; - malloc_mutex_lock(tsd, &prof_gdump_mtx); + malloc_mutex_lock(tsdn, &prof_gdump_mtx); prof_gdump_old = prof_gdump_val; prof_gdump_val = gdump; - malloc_mutex_unlock(tsd, &prof_gdump_mtx); + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); return (prof_gdump_old); } @@ -2185,7 +2189,7 @@ prof_boot1(void) } bool -prof_boot2(tsd_t *tsd) +prof_boot2(tsdn_t *tsdn) { cassert(config_prof); @@ -2211,7 +2215,7 @@ prof_boot2(tsd_t *tsd) WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) return (true); - if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + if (ckh_new(tsdn, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, prof_bt_keycomp)) return (true); if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", @@ -2242,8 +2246,8 @@ prof_boot2(tsd_t *tsd) abort(); } - gctx_locks = (malloc_mutex_t *)base_alloc(tsd, PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); + gctx_locks = (malloc_mutex_t *)base_alloc(tsdn, PROF_NCTX_LOCKS + * sizeof(malloc_mutex_t)); if (gctx_locks == NULL) return (true); for (i = 0; i < PROF_NCTX_LOCKS; i++) { @@ -2252,7 +2256,7 @@ prof_boot2(tsd_t *tsd) return (true); } - tdata_locks = (malloc_mutex_t *)base_alloc(tsd, + tdata_locks = (malloc_mutex_t *)base_alloc(tsdn, PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t)); if (tdata_locks == NULL) return (true); @@ -2277,76 +2281,77 @@ prof_boot2(tsd_t *tsd) } void -prof_prefork0(tsd_t *tsd) +prof_prefork0(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_prefork(tsd, &prof_dump_mtx); - malloc_mutex_prefork(tsd, &bt2gctx_mtx); - malloc_mutex_prefork(tsd, &tdatas_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_prefork(tsd, &tdata_locks[i]); + malloc_mutex_prefork(tsdn, &tdata_locks[i]); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(tsd, &gctx_locks[i]); + malloc_mutex_prefork(tsdn, &gctx_locks[i]); } } void -prof_prefork1(tsd_t *tsd) +prof_prefork1(tsdn_t *tsdn) { if (opt_prof) { - malloc_mutex_prefork(tsd, &prof_active_mtx); - malloc_mutex_prefork(tsd, &prof_dump_seq_mtx); - malloc_mutex_prefork(tsd, &prof_gdump_mtx); - malloc_mutex_prefork(tsd, &next_thr_uid_mtx); - malloc_mutex_prefork(tsd, &prof_thread_active_init_mtx); + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); } } void -prof_postfork_parent(tsd_t *tsd) +prof_postfork_parent(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_postfork_parent(tsd, &prof_thread_active_init_mtx); - malloc_mutex_postfork_parent(tsd, &next_thr_uid_mtx); - malloc_mutex_postfork_parent(tsd, &prof_gdump_mtx); - malloc_mutex_postfork_parent(tsd, &prof_dump_seq_mtx); - malloc_mutex_postfork_parent(tsd, &prof_active_mtx); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(tsd, &gctx_locks[i]); + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_parent(tsd, &tdata_locks[i]); - malloc_mutex_postfork_parent(tsd, &tdatas_mtx); - malloc_mutex_postfork_parent(tsd, &bt2gctx_mtx); - malloc_mutex_postfork_parent(tsd, &prof_dump_mtx); + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(tsd_t *tsd) +prof_postfork_child(tsdn_t *tsdn) { if (opt_prof) { unsigned i; - malloc_mutex_postfork_child(tsd, &prof_thread_active_init_mtx); - malloc_mutex_postfork_child(tsd, &next_thr_uid_mtx); - malloc_mutex_postfork_child(tsd, &prof_gdump_mtx); - malloc_mutex_postfork_child(tsd, &prof_dump_seq_mtx); - malloc_mutex_postfork_child(tsd, &prof_active_mtx); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(tsd, &gctx_locks[i]); + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); for (i = 0; i < PROF_NTDATA_LOCKS; i++) - malloc_mutex_postfork_child(tsd, &tdata_locks[i]); - malloc_mutex_postfork_child(tsd, &tdatas_mtx); - malloc_mutex_postfork_child(tsd, &bt2gctx_mtx); - malloc_mutex_postfork_child(tsd, &prof_dump_mtx); + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } diff --git a/src/quarantine.c b/src/quarantine.c index ff1637ec..18903fb5 100644 --- a/src/quarantine.c +++ b/src/quarantine.c @@ -13,24 +13,22 @@ /* Function prototypes for non-inline static functions. */ static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine); -static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, +static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); +static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound); /******************************************************************************/ static quarantine_t * -quarantine_init(tsd_t *tsd, size_t lg_maxobjs) +quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) { quarantine_t *quarantine; size_t size; - assert(tsd_nominal(tsd)); - size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)); - quarantine = (quarantine_t *)iallocztm(tsd, size, size2index(size), - false, NULL, true, arena_get(NULL, 0, true), true); + quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), + false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (quarantine == NULL) return (NULL); quarantine->curbytes = 0; @@ -49,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd) if (!tsd_nominal(tsd)) return; - quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT); + quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); /* * Check again whether quarantine has been initialized, because * quarantine_init() may have triggered recursive initialization. @@ -57,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd) if (tsd_quarantine_get(tsd) == NULL) tsd_quarantine_set(tsd, quarantine); else - idalloctm(tsd, quarantine, NULL, true, true); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); } static quarantine_t * @@ -65,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) { quarantine_t *ret; - ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1); + ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); if (ret == NULL) { - quarantine_drain_one(tsd, quarantine); + quarantine_drain_one(tsd_tsdn(tsd), quarantine); return (quarantine); } @@ -89,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * sizeof(quarantine_obj_t)); } - idalloctm(tsd, quarantine, NULL, true, true); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, ret); return (ret); } static void -quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) +quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) { quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(tsd, obj->ptr, config_prof)); - idalloctm(tsd, obj->ptr, NULL, false, true); + assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); + idalloctm(tsdn, obj->ptr, NULL, false, true); quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << @@ -108,24 +106,24 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine) } static void -quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound) +quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) { while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(tsd, quarantine); + quarantine_drain_one(tsdn, quarantine); } void quarantine(tsd_t *tsd, void *ptr) { quarantine_t *quarantine; - size_t usize = isalloc(tsd, ptr, config_prof); + size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); cassert(config_fill); assert(opt_quarantine); if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { - idalloctm(tsd, ptr, NULL, false, true); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); return; } /* @@ -135,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr) if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; - quarantine_drain(tsd, quarantine, upper_bound); + quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) @@ -164,7 +162,7 @@ quarantine(tsd_t *tsd, void *ptr) } } else { assert(quarantine->curbytes == 0); - idalloctm(tsd, ptr, NULL, false, true); + idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); } } @@ -178,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd) quarantine = tsd_quarantine_get(tsd); if (quarantine != NULL) { - quarantine_drain(tsd, quarantine, 0); - idalloctm(tsd, quarantine, NULL, true, true); + quarantine_drain(tsd_tsdn(tsd), quarantine, 0); + idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, NULL); } } diff --git a/src/tcache.c b/src/tcache.c index 88005f30..175759c7 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -24,10 +24,10 @@ static tcaches_t *tcaches_avail; /******************************************************************************/ size_t -tcache_salloc(tsd_t *tsd, const void *ptr) +tcache_salloc(tsdn_t *tsdn, const void *ptr) { - return (arena_salloc(tsd, ptr, false)); + return (arena_salloc(tsdn, ptr, false)); } void @@ -71,12 +71,12 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) } void * -tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; - arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ? + arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; @@ -107,13 +107,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { - if (arena_prof_accum(tsd, arena, + if (arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) - prof_idump(tsd); + prof_idump(tsd_tsdn(tsd)); tcache->prof_accumbytes = 0; } - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; @@ -131,8 +131,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = arena_bitselm_get_mutable(chunk, pageind); - arena_dalloc_bin_junked_locked(tsd, bin_arena, - chunk, ptr, bitselm); + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, chunk, ptr, bitselm); } else { /* * This object was allocated via a different @@ -144,8 +144,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, ndeferred++; } } - malloc_mutex_unlock(tsd, &bin->lock); - arena_decay_ticks(tsd, bin_arena, nflush - ndeferred); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); } if (config_stats && !merged_stats) { /* @@ -153,11 +153,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, * arena, so the stats didn't get merged. Manually do so now. */ arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * @@ -190,7 +190,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, if (config_prof) idump = false; - malloc_mutex_lock(tsd, &locked_arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { idump = arena_prof_accum_locked(arena, @@ -213,7 +213,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == locked_arena) { - arena_dalloc_large_junked_locked(tsd, + arena_dalloc_large_junked_locked(tsd_tsdn(tsd), locked_arena, chunk, ptr); } else { /* @@ -226,22 +226,23 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, ndeferred++; } } - malloc_mutex_unlock(tsd, &locked_arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); if (config_prof && idump) - prof_idump(tsd); - arena_decay_ticks(tsd, locked_arena, nflush - ndeferred); + prof_idump(tsd_tsdn(tsd)); + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * @@ -251,35 +252,26 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, tbin->low_water = tbin->ncached; } -void -tcache_arena_associate(tsd_t *tsd, tcache_t *tcache, arena_t *arena) +static void +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsdn, &arena->lock); } } -void -tcache_arena_reassociate(tsd_t *tsd, tcache_t *tcache, arena_t *oldarena, - arena_t *newarena) -{ - - tcache_arena_dissociate(tsd, tcache, oldarena); - tcache_arena_associate(tsd, tcache, newarena); -} - -void -tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena) +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsdn, &arena->lock); if (config_debug) { bool in_ql = false; tcache_t *iter; @@ -292,11 +284,20 @@ tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena) assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); - tcache_stats_merge(tsd, tcache, arena); - malloc_mutex_unlock(tsd, &arena->lock); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->lock); } } +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, + arena_t *newarena) +{ + + tcache_arena_dissociate(tsdn, tcache, oldarena); + tcache_arena_associate(tsdn, tcache, newarena); +} + tcache_t * tcache_get_hard(tsd_t *tsd) { @@ -310,11 +311,11 @@ tcache_get_hard(tsd_t *tsd) arena = arena_choose(tsd, NULL); if (unlikely(arena == NULL)) return (NULL); - return (tcache_create(tsd, arena)); + return (tcache_create(tsd_tsdn(tsd), arena)); } tcache_t * -tcache_create(tsd_t *tsd, arena_t *arena) +tcache_create(tsdn_t *tsdn, arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; @@ -328,12 +329,12 @@ tcache_create(tsd_t *tsd, arena_t *arena) /* Avoid false cacheline sharing. */ size = sa2u(size, CACHELINE); - tcache = ipallocztm(tsd, size, CACHELINE, true, NULL, true, - arena_get(NULL, 0, true)); + tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) return (NULL); - tcache_arena_associate(tsd, tcache, arena); + tcache_arena_associate(tsdn, tcache, arena); ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); @@ -360,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) unsigned i; arena = arena_choose(tsd, NULL); - tcache_arena_dissociate(tsd, tcache, arena); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; @@ -368,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) if (config_stats && tbin->tstats.nrequests != 0) { arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } } @@ -379,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { - malloc_mutex_lock(tsd, &arena->lock); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(tsd, &arena->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tsd, arena, tcache->prof_accumbytes)) - prof_idump(tsd); + arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) + prof_idump(tsd_tsdn(tsd)); - idalloctm(tsd, tcache, NULL, true, true); + idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); } void @@ -416,21 +417,21 @@ tcache_enabled_cleanup(tsd_t *tsd) } void -tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena) +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); - malloc_mutex_assert_owner(tsd, &arena->lock); + malloc_mutex_assert_owner(tsdn, &arena->lock); /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(tsd, &bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(tsd, &bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } @@ -444,14 +445,14 @@ tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena) } bool -tcaches_create(tsd_t *tsd, unsigned *r_ind) +tcaches_create(tsdn_t *tsdn, unsigned *r_ind) { arena_t *arena; tcache_t *tcache; tcaches_t *elm; if (tcaches == NULL) { - tcaches = base_alloc(tsd, sizeof(tcache_t *) * + tcaches = base_alloc(tsdn, sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1)); if (tcaches == NULL) return (true); @@ -459,10 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) return (true); - arena = arena_ichoose(tsd, NULL); + arena = arena_ichoose(tsdn, NULL); if (unlikely(arena == NULL)) return (true); - tcache = tcache_create(tsd, arena); + tcache = tcache_create(tsdn, arena); if (tcache == NULL) return (true); @@ -508,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) } bool -tcache_boot(tsd_t *tsd) +tcache_boot(tsdn_t *tsdn) { unsigned i; @@ -526,7 +527,7 @@ tcache_boot(tsd_t *tsd) nhbins = size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsd, nhbins * + tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * sizeof(tcache_bin_info_t)); if (tcache_bin_info == NULL) return (true); diff --git a/src/witness.c b/src/witness.c index 31c36a24..f5176b6f 100644 --- a/src/witness.c +++ b/src/witness.c @@ -34,17 +34,19 @@ witness_lock_error_t *witness_lock_error = JEMALLOC_N(witness_lock_error_impl); #endif void -witness_lock(tsd_t *tsd, witness_t *witness) +witness_lock(tsdn_t *tsdn, witness_t *witness) { + tsd_t *tsd; witness_list_t *witnesses; witness_t *w; - if (tsd == NULL) + if (tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; - witness_assert_not_owner(tsd, witness); + witness_assert_not_owner(tsdn, witness); witnesses = tsd_witnessesp_get(tsd); w = ql_last(witnesses, link); @@ -69,16 +71,18 @@ witness_lock(tsd_t *tsd, witness_t *witness) } void -witness_unlock(tsd_t *tsd, witness_t *witness) +witness_unlock(tsdn_t *tsdn, witness_t *witness) { + tsd_t *tsd; witness_list_t *witnesses; - if (tsd == NULL) + if (tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; - witness_assert_owner(tsd, witness); + witness_assert_owner(tsdn, witness); witnesses = tsd_witnessesp_get(tsd); ql_remove(witnesses, witness, link); @@ -104,13 +108,15 @@ witness_owner_error_t *witness_owner_error = #endif void -witness_assert_owner(tsd_t *tsd, const witness_t *witness) +witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) { + tsd_t *tsd; witness_list_t *witnesses; witness_t *w; - if (tsd == NULL) + if (tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; @@ -142,13 +148,15 @@ witness_not_owner_error_t *witness_not_owner_error = #endif void -witness_assert_not_owner(tsd_t *tsd, const witness_t *witness) +witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) { + tsd_t *tsd; witness_list_t *witnesses; witness_t *w; - if (tsd == NULL) + if (tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; @@ -183,13 +191,15 @@ witness_lockless_error_t *witness_lockless_error = #endif void -witness_assert_lockless(tsd_t *tsd) +witness_assert_lockless(tsdn_t *tsdn) { + tsd_t *tsd; witness_list_t *witnesses; witness_t *w; - if (tsd == NULL) + if (tsdn_null(tsdn)) return; + tsd = tsdn_tsd(tsdn); witnesses = tsd_witnessesp_get(tsd); w = ql_last(witnesses, link); @@ -202,7 +212,7 @@ void witnesses_cleanup(tsd_t *tsd) { - witness_assert_lockless(tsd); + witness_assert_lockless(tsd_tsdn(tsd)); /* Do nothing. */ } diff --git a/src/zone.c b/src/zone.c index 8f25051a..2c17123a 100644 --- a/src/zone.c +++ b/src/zone.c @@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr) * not work in practice, we must check all pointers to assure that they * reside within a mapped chunk before determining size. */ - return (ivsalloc(tsd_fetch(), ptr, config_prof)); + return (ivsalloc(tsdn_fetch(), ptr, config_prof)); } static void * @@ -87,7 +87,7 @@ static void zone_free(malloc_zone_t *zone, void *ptr) { - if (ivsalloc(tsd_fetch(), ptr, config_prof) != 0) { + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) { je_free(ptr); return; } @@ -99,7 +99,7 @@ static void * zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { - if (ivsalloc(tsd_fetch(), ptr, config_prof) != 0) + if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) return (je_realloc(ptr, size)); return (realloc(ptr, size)); @@ -123,7 +123,7 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { size_t alloc_size; - alloc_size = ivsalloc(tsd_fetch(), ptr, config_prof); + alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof); if (alloc_size != 0) { assert(alloc_size == size); je_free(ptr); diff --git a/test/unit/arena_reset.c b/test/unit/arena_reset.c index 8e769de6..8ba36c21 100644 --- a/test/unit/arena_reset.c +++ b/test/unit/arena_reset.c @@ -86,7 +86,7 @@ TEST_BEGIN(test_arena_reset) void **ptrs; int flags; size_t mib[3]; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill && unlikely(opt_quarantine))); @@ -124,11 +124,11 @@ TEST_BEGIN(test_arena_reset) "Unexpected mallocx(%zu, %#x) failure", sz, flags); } - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); /* Verify allocations. */ for (i = 0; i < nptrs; i++) { - assert_zu_gt(ivsalloc(tsd, ptrs[i], false), 0, + assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0, "Allocation should have queryable size"); } @@ -142,7 +142,7 @@ TEST_BEGIN(test_arena_reset) /* Verify allocations no longer exist. */ for (i = 0; i < nptrs; i++) { - assert_zu_eq(ivsalloc(tsd, ptrs[i], false), 0, + assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0, "Allocation should no longer exist"); } diff --git a/test/unit/ckh.c b/test/unit/ckh.c index b1175959..961e2acb 100644 --- a/test/unit/ckh.c +++ b/test/unit/ckh.c @@ -2,24 +2,24 @@ TEST_BEGIN(test_new_delete) { - tsd_t *tsd; + tsdn_t *tsdn; ckh_t ckh; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); - ckh_delete(tsd, &ckh); + assert_false(ckh_new(tsdn, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); + ckh_delete(tsdn, &ckh); - assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, + assert_false(ckh_new(tsdn, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); - ckh_delete(tsd, &ckh); + ckh_delete(tsdn, &ckh); } TEST_END TEST_BEGIN(test_count_insert_search_remove) { - tsd_t *tsd; + tsdn_t *tsdn; ckh_t ckh; const char *strs[] = { "a string", @@ -30,17 +30,17 @@ TEST_BEGIN(test_count_insert_search_remove) const char *missing = "A string not in the hash table."; size_t i; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), - "Unexpected ckh_new() error"); + assert_false(ckh_new(tsdn, &ckh, 2, ckh_string_hash, + ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { - ckh_insert(tsd, &ckh, strs[i], strs[i]); + ckh_insert(tsdn, &ckh, strs[i], strs[i]); assert_zu_eq(ckh_count(&ckh), i+1, "ckh_count() should return %zu, but it returned %zu", i+1, ckh_count(&ckh)); @@ -85,7 +85,7 @@ TEST_BEGIN(test_count_insert_search_remove) vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; - assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), + assert_false(ckh_remove(tsdn, &ckh, strs[i], kp, vp), "Unexpected ckh_remove() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; @@ -101,22 +101,22 @@ TEST_BEGIN(test_count_insert_search_remove) ckh_count(&ckh)); } - ckh_delete(tsd, &ckh); + ckh_delete(tsdn, &ckh); } TEST_END TEST_BEGIN(test_insert_iter_remove) { #define NITEMS ZU(1000) - tsd_t *tsd; + tsdn_t *tsdn; ckh_t ckh; void **p[NITEMS]; void *q, *r; size_t i; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, + assert_false(ckh_new(tsdn, &ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); for (i = 0; i < NITEMS; i++) { @@ -128,7 +128,7 @@ TEST_BEGIN(test_insert_iter_remove) size_t j; for (j = i; j < NITEMS; j++) { - assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), + assert_false(ckh_insert(tsdn, &ckh, p[j], p[j]), "Unexpected ckh_insert() failure"); assert_false(ckh_search(&ckh, p[j], &q, &r), "Unexpected ckh_search() failure"); @@ -143,13 +143,13 @@ TEST_BEGIN(test_insert_iter_remove) for (j = i + 1; j < NITEMS; j++) { assert_false(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() failure"); - assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), + assert_false(ckh_remove(tsdn, &ckh, p[j], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() success"); - assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), + assert_true(ckh_remove(tsdn, &ckh, p[j], &q, &r), "Unexpected ckh_remove() success"); } @@ -184,13 +184,13 @@ TEST_BEGIN(test_insert_iter_remove) for (i = 0; i < NITEMS; i++) { assert_false(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() failure"); - assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), + assert_false(ckh_remove(tsdn, &ckh, p[i], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[i], q, "Key pointer mismatch"); assert_ptr_eq(p[i], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() success"); - assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), + assert_true(ckh_remove(tsdn, &ckh, p[i], &q, &r), "Unexpected ckh_remove() success"); dallocx(p[i], 0); } @@ -198,7 +198,7 @@ TEST_BEGIN(test_insert_iter_remove) assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); - ckh_delete(tsd, &ckh); + ckh_delete(tsdn, &ckh); #undef NITEMS } TEST_END diff --git a/test/unit/junk.c b/test/unit/junk.c index 414874a0..acddc601 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -53,10 +53,10 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize) } static void -huge_dalloc_junk_intercept(tsd_t *tsd, void *ptr, size_t usize) +huge_dalloc_junk_intercept(tsdn_t *tsdn, void *ptr, size_t usize) { - huge_dalloc_junk_orig(tsd, ptr, usize); + huge_dalloc_junk_orig(tsdn, ptr, usize); /* * The conditions under which junk filling actually occurs are nuanced * enough that it doesn't make sense to duplicate the decision logic in diff --git a/test/unit/prof_reset.c b/test/unit/prof_reset.c index 83f51df8..5ae45fd2 100644 --- a/test/unit/prof_reset.c +++ b/test/unit/prof_reset.c @@ -94,7 +94,7 @@ TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool -prof_dump_header_intercept(tsd_t *tsd, bool propagate_err, +prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { diff --git a/test/unit/witness.c b/test/unit/witness.c index 430d8203..ed172753 100644 --- a/test/unit/witness.c +++ b/test/unit/witness.c @@ -60,76 +60,76 @@ witness_comp_reverse(const witness_t *a, const witness_t *b) TEST_BEGIN(test_witness) { witness_t a, b; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if(!config_debug); - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); - witness_assert_not_owner(tsd, &a); - witness_lock(tsd, &a); - witness_assert_owner(tsd, &a); + witness_assert_not_owner(tsdn, &a); + witness_lock(tsdn, &a); + witness_assert_owner(tsdn, &a); witness_init(&b, "b", 2, NULL); - witness_assert_not_owner(tsd, &b); - witness_lock(tsd, &b); - witness_assert_owner(tsd, &b); + witness_assert_not_owner(tsdn, &b); + witness_lock(tsdn, &b); + witness_assert_owner(tsdn, &b); - witness_unlock(tsd, &a); - witness_unlock(tsd, &b); + witness_unlock(tsdn, &a); + witness_unlock(tsdn, &b); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); } TEST_END TEST_BEGIN(test_witness_comp) { witness_t a, b, c, d; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if(!config_debug); - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_init(&a, "a", 1, witness_comp); - witness_assert_not_owner(tsd, &a); - witness_lock(tsd, &a); - witness_assert_owner(tsd, &a); + witness_assert_not_owner(tsdn, &a); + witness_lock(tsdn, &a); + witness_assert_owner(tsdn, &a); witness_init(&b, "b", 1, witness_comp); - witness_assert_not_owner(tsd, &b); - witness_lock(tsd, &b); - witness_assert_owner(tsd, &b); - witness_unlock(tsd, &b); + witness_assert_not_owner(tsdn, &b); + witness_lock(tsdn, &b); + witness_assert_owner(tsdn, &b); + witness_unlock(tsdn, &b); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_init(&c, "c", 1, witness_comp_reverse); - witness_assert_not_owner(tsd, &c); + witness_assert_not_owner(tsdn, &c); assert_false(saw_lock_error, "Unexpected witness lock error"); - witness_lock(tsd, &c); + witness_lock(tsdn, &c); assert_true(saw_lock_error, "Expected witness lock error"); - witness_unlock(tsd, &c); + witness_unlock(tsdn, &c); saw_lock_error = false; witness_init(&d, "d", 1, NULL); - witness_assert_not_owner(tsd, &d); + witness_assert_not_owner(tsdn, &d); assert_false(saw_lock_error, "Unexpected witness lock error"); - witness_lock(tsd, &d); + witness_lock(tsdn, &d); assert_true(saw_lock_error, "Expected witness lock error"); - witness_unlock(tsd, &d); + witness_unlock(tsdn, &d); - witness_unlock(tsd, &a); + witness_unlock(tsdn, &a); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_lock_error = witness_lock_error_orig; } @@ -138,7 +138,7 @@ TEST_END TEST_BEGIN(test_witness_reversal) { witness_t a, b; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if(!config_debug); @@ -146,22 +146,22 @@ TEST_BEGIN(test_witness_reversal) witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); witness_init(&b, "b", 2, NULL); - witness_lock(tsd, &b); + witness_lock(tsdn, &b); assert_false(saw_lock_error, "Unexpected witness lock error"); - witness_lock(tsd, &a); + witness_lock(tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); - witness_unlock(tsd, &a); - witness_unlock(tsd, &b); + witness_unlock(tsdn, &a); + witness_unlock(tsdn, &b); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_lock_error = witness_lock_error_orig; } @@ -170,7 +170,7 @@ TEST_END TEST_BEGIN(test_witness_recursive) { witness_t a; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if(!config_debug); @@ -182,22 +182,22 @@ TEST_BEGIN(test_witness_recursive) witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); - witness_lock(tsd, &a); + witness_lock(tsdn, &a); assert_false(saw_lock_error, "Unexpected witness lock error"); assert_false(saw_not_owner_error, "Unexpected witness not owner error"); - witness_lock(tsd, &a); + witness_lock(tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); assert_true(saw_not_owner_error, "Expected witness not owner error"); - witness_unlock(tsd, &a); + witness_unlock(tsdn, &a); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_owner_error = witness_owner_error_orig; witness_lock_error = witness_lock_error_orig; @@ -208,7 +208,7 @@ TEST_END TEST_BEGIN(test_witness_unlock_not_owned) { witness_t a; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if(!config_debug); @@ -216,17 +216,17 @@ TEST_BEGIN(test_witness_unlock_not_owned) witness_owner_error = witness_owner_error_intercept; saw_owner_error = false; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); assert_false(saw_owner_error, "Unexpected owner error"); - witness_unlock(tsd, &a); + witness_unlock(tsdn, &a); assert_true(saw_owner_error, "Expected owner error"); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_owner_error = witness_owner_error_orig; } @@ -235,7 +235,7 @@ TEST_END TEST_BEGIN(test_witness_lockful) { witness_t a; - tsd_t *tsd; + tsdn_t *tsdn; test_skip_if(!config_debug); @@ -243,22 +243,22 @@ TEST_BEGIN(test_witness_lockful) witness_lockless_error = witness_lockless_error_intercept; saw_lockless_error = false; - tsd = tsd_fetch(); + tsdn = tsdn_fetch(); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); assert_false(saw_lockless_error, "Unexpected lockless error"); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); - witness_lock(tsd, &a); - witness_assert_lockless(tsd); + witness_lock(tsdn, &a); + witness_assert_lockless(tsdn); assert_true(saw_lockless_error, "Expected lockless error"); - witness_unlock(tsd, &a); + witness_unlock(tsdn, &a); - witness_assert_lockless(tsd); + witness_assert_lockless(tsdn); witness_lockless_error = witness_lockless_error_orig; }