parent
8413463f3a
commit
b2c0d6322d
@ -103,7 +103,8 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
|||||||
$(srcroot)src/tcache.c \
|
$(srcroot)src/tcache.c \
|
||||||
$(srcroot)src/ticker.c \
|
$(srcroot)src/ticker.c \
|
||||||
$(srcroot)src/tsd.c \
|
$(srcroot)src/tsd.c \
|
||||||
$(srcroot)src/util.c
|
$(srcroot)src/util.c \
|
||||||
|
$(srcroot)src/witness.c
|
||||||
ifeq ($(enable_valgrind), 1)
|
ifeq ($(enable_valgrind), 1)
|
||||||
C_SRCS += $(srcroot)src/valgrind.c
|
C_SRCS += $(srcroot)src/valgrind.c
|
||||||
endif
|
endif
|
||||||
@ -169,6 +170,7 @@ TESTS_UNIT := $(srcroot)test/unit/atomic.c \
|
|||||||
$(srcroot)test/unit/nstime.c \
|
$(srcroot)test/unit/nstime.c \
|
||||||
$(srcroot)test/unit/tsd.c \
|
$(srcroot)test/unit/tsd.c \
|
||||||
$(srcroot)test/unit/util.c \
|
$(srcroot)test/unit/util.c \
|
||||||
|
$(srcroot)test/unit/witness.c \
|
||||||
$(srcroot)test/unit/zero.c
|
$(srcroot)test/unit/zero.c
|
||||||
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
||||||
$(srcroot)test/integration/allocated.c \
|
$(srcroot)test/integration/allocated.c \
|
||||||
|
@ -506,23 +506,25 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
|
|||||||
bool cache);
|
bool cache);
|
||||||
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
|
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
|
||||||
bool cache);
|
bool cache);
|
||||||
extent_node_t *arena_node_alloc(arena_t *arena);
|
extent_node_t *arena_node_alloc(tsd_t *tsd, arena_t *arena);
|
||||||
void arena_node_dalloc(arena_t *arena, extent_node_t *node);
|
void arena_node_dalloc(tsd_t *tsd, arena_t *arena, extent_node_t *node);
|
||||||
void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
void *arena_chunk_alloc_huge(tsd_t *tsd, arena_t *arena, size_t usize,
|
||||||
bool *zero);
|
size_t alignment, bool *zero);
|
||||||
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
|
void arena_chunk_dalloc_huge(tsd_t *tsd, arena_t *arena, void *chunk,
|
||||||
void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
|
size_t usize);
|
||||||
|
void arena_chunk_ralloc_huge_similar(tsd_t *tsd, arena_t *arena, void *chunk,
|
||||||
size_t oldsize, size_t usize);
|
size_t oldsize, size_t usize);
|
||||||
void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
|
void arena_chunk_ralloc_huge_shrink(tsd_t *tsd, arena_t *arena, void *chunk,
|
||||||
size_t oldsize, size_t usize);
|
size_t oldsize, size_t usize);
|
||||||
bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
|
bool arena_chunk_ralloc_huge_expand(tsd_t *tsd, arena_t *arena, void *chunk,
|
||||||
size_t oldsize, size_t usize, bool *zero);
|
size_t oldsize, size_t usize, bool *zero);
|
||||||
ssize_t arena_lg_dirty_mult_get(arena_t *arena);
|
ssize_t arena_lg_dirty_mult_get(tsd_t *tsd, arena_t *arena);
|
||||||
bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
|
bool arena_lg_dirty_mult_set(tsd_t *tsd, arena_t *arena,
|
||||||
ssize_t arena_decay_time_get(arena_t *arena);
|
ssize_t lg_dirty_mult);
|
||||||
bool arena_decay_time_set(arena_t *arena, ssize_t decay_time);
|
ssize_t arena_decay_time_get(tsd_t *tsd, arena_t *arena);
|
||||||
void arena_maybe_purge(arena_t *arena);
|
bool arena_decay_time_set(tsd_t *tsd, arena_t *arena, ssize_t decay_time);
|
||||||
void arena_purge(arena_t *arena, bool all);
|
void arena_purge(tsd_t *tsd, arena_t *arena, bool all);
|
||||||
|
void arena_maybe_purge(tsd_t *tsd, arena_t *arena);
|
||||||
void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
|
void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
|
||||||
szind_t binind, uint64_t prof_accumbytes);
|
szind_t binind, uint64_t prof_accumbytes);
|
||||||
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
||||||
@ -542,11 +544,11 @@ void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
|
|||||||
bool zero, tcache_t *tcache);
|
bool zero, tcache_t *tcache);
|
||||||
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
|
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache);
|
size_t alignment, bool zero, tcache_t *tcache);
|
||||||
void arena_prof_promoted(const void *ptr, size_t size);
|
void arena_prof_promoted(tsd_t *tsd, const void *ptr, size_t size);
|
||||||
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
|
void arena_dalloc_bin_junked_locked(tsd_t *tsd, arena_t *arena,
|
||||||
void *ptr, arena_chunk_map_bits_t *bitselm);
|
arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
|
||||||
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
void arena_dalloc_bin(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
|
||||||
size_t pageind, arena_chunk_map_bits_t *bitselm);
|
void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
|
||||||
void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
|
void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
|
||||||
void *ptr, size_t pageind);
|
void *ptr, size_t pageind);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
@ -555,8 +557,8 @@ extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
|
|||||||
#else
|
#else
|
||||||
void arena_dalloc_junk_large(void *ptr, size_t usize);
|
void arena_dalloc_junk_large(void *ptr, size_t usize);
|
||||||
#endif
|
#endif
|
||||||
void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
|
void arena_dalloc_large_junked_locked(tsd_t *tsd, arena_t *arena,
|
||||||
void *ptr);
|
arena_chunk_t *chunk, void *ptr);
|
||||||
void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
|
void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
|
||||||
void *ptr);
|
void *ptr);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
@ -567,27 +569,28 @@ bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
|
|||||||
size_t extra, bool zero);
|
size_t extra, bool zero);
|
||||||
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t alignment, bool zero, tcache_t *tcache);
|
size_t size, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
dss_prec_t arena_dss_prec_get(tsd_t *tsd, arena_t *arena);
|
||||||
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
bool arena_dss_prec_set(tsd_t *tsd, arena_t *arena, dss_prec_t dss_prec);
|
||||||
ssize_t arena_lg_dirty_mult_default_get(void);
|
ssize_t arena_lg_dirty_mult_default_get(void);
|
||||||
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
|
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
|
||||||
ssize_t arena_decay_time_default_get(void);
|
ssize_t arena_decay_time_default_get(void);
|
||||||
bool arena_decay_time_default_set(ssize_t decay_time);
|
bool arena_decay_time_default_set(ssize_t decay_time);
|
||||||
void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
|
void arena_basic_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads,
|
||||||
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
||||||
size_t *nactive, size_t *ndirty);
|
size_t *nactive, size_t *ndirty);
|
||||||
void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
|
void arena_stats_merge(tsd_t *tsd, arena_t *arena, unsigned *nthreads,
|
||||||
ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
|
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
||||||
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
||||||
malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
|
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
|
||||||
|
malloc_huge_stats_t *hstats);
|
||||||
unsigned arena_nthreads_get(arena_t *arena);
|
unsigned arena_nthreads_get(arena_t *arena);
|
||||||
void arena_nthreads_inc(arena_t *arena);
|
void arena_nthreads_inc(arena_t *arena);
|
||||||
void arena_nthreads_dec(arena_t *arena);
|
void arena_nthreads_dec(arena_t *arena);
|
||||||
arena_t *arena_new(unsigned ind);
|
arena_t *arena_new(tsd_t *tsd, unsigned ind);
|
||||||
bool arena_boot(void);
|
bool arena_boot(void);
|
||||||
void arena_prefork(arena_t *arena);
|
void arena_prefork(tsd_t *tsd, arena_t *arena);
|
||||||
void arena_postfork_parent(arena_t *arena);
|
void arena_postfork_parent(tsd_t *tsd, arena_t *arena);
|
||||||
void arena_postfork_child(arena_t *arena);
|
void arena_postfork_child(tsd_t *tsd, arena_t *arena);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -644,21 +647,22 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
|||||||
size_t arena_metadata_allocated_get(arena_t *arena);
|
size_t arena_metadata_allocated_get(arena_t *arena);
|
||||||
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||||
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||||
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum(tsd_t *tsd, arena_t *arena, uint64_t accumbytes);
|
||||||
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
||||||
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||||
size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||||
const void *ptr);
|
const void *ptr);
|
||||||
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
|
prof_tctx_t *arena_prof_tctx_get(tsd_t *tsd, const void *ptr);
|
||||||
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
void arena_prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
void arena_prof_tctx_reset(const void *ptr, size_t usize,
|
prof_tctx_t *tctx);
|
||||||
|
void arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
const void *old_ptr, prof_tctx_t *old_tctx);
|
const void *old_ptr, prof_tctx_t *old_tctx);
|
||||||
void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
|
void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
|
||||||
void arena_decay_tick(tsd_t *tsd, arena_t *arena);
|
void arena_decay_tick(tsd_t *tsd, arena_t *arena);
|
||||||
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
|
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
|
||||||
bool zero, tcache_t *tcache, bool slow_path);
|
bool zero, tcache_t *tcache, bool slow_path);
|
||||||
arena_t *arena_aalloc(const void *ptr);
|
arena_t *arena_aalloc(const void *ptr);
|
||||||
size_t arena_salloc(const void *ptr, bool demote);
|
size_t arena_salloc(tsd_t *tsd, const void *ptr, bool demote);
|
||||||
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
|
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
|
||||||
void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
|
void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
|
||||||
#endif
|
#endif
|
||||||
@ -1035,7 +1039,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
arena_prof_accum(tsd_t *tsd, arena_t *arena, uint64_t accumbytes)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
@ -1046,9 +1050,9 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
|||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(tsd, &arena->lock);
|
||||||
ret = arena_prof_accum_impl(arena, accumbytes);
|
ret = arena_prof_accum_impl(arena, accumbytes);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(tsd, &arena->lock);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1184,7 +1188,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE prof_tctx_t *
|
JEMALLOC_INLINE prof_tctx_t *
|
||||||
arena_prof_tctx_get(const void *ptr)
|
arena_prof_tctx_get(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
prof_tctx_t *ret;
|
prof_tctx_t *ret;
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
@ -1205,13 +1209,14 @@ arena_prof_tctx_get(const void *ptr)
|
|||||||
ret = atomic_read_p(&elm->prof_tctx_pun);
|
ret = atomic_read_p(&elm->prof_tctx_pun);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ret = huge_prof_tctx_get(ptr);
|
ret = huge_prof_tctx_get(tsd, ptr);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
arena_prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
|
prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
@ -1242,12 +1247,12 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
|||||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_prof_tctx_set(ptr, tctx);
|
huge_prof_tctx_set(tsd, ptr, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
arena_prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
prof_tctx_t *old_tctx)
|
const void *old_ptr, prof_tctx_t *old_tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
@ -1270,7 +1275,7 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
|||||||
atomic_write_p(&elm->prof_tctx_pun,
|
atomic_write_p(&elm->prof_tctx_pun,
|
||||||
(prof_tctx_t *)(uintptr_t)1U);
|
(prof_tctx_t *)(uintptr_t)1U);
|
||||||
} else
|
} else
|
||||||
huge_prof_tctx_reset(ptr);
|
huge_prof_tctx_reset(tsd, ptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1285,7 +1290,7 @@ arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
|
|||||||
if (unlikely(decay_ticker == NULL))
|
if (unlikely(decay_ticker == NULL))
|
||||||
return;
|
return;
|
||||||
if (unlikely(ticker_ticks(decay_ticker, nticks)))
|
if (unlikely(ticker_ticks(decay_ticker, nticks)))
|
||||||
arena_purge(arena, false);
|
arena_purge(tsd, arena, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -1332,7 +1337,7 @@ arena_aalloc(const void *ptr)
|
|||||||
|
|
||||||
/* Return the size of the allocation pointed to by ptr. */
|
/* Return the size of the allocation pointed to by ptr. */
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
arena_salloc(const void *ptr, bool demote)
|
arena_salloc(tsd_t *tsd, const void *ptr, bool demote)
|
||||||
{
|
{
|
||||||
size_t ret;
|
size_t ret;
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
@ -1375,7 +1380,7 @@ arena_salloc(const void *ptr, bool demote)
|
|||||||
ret = index2size(binind);
|
ret = index2size(binind);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ret = huge_salloc(ptr);
|
ret = huge_salloc(tsd, ptr);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -1445,7 +1450,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
|
|||||||
pageind) - large_pad;
|
pageind) - large_pad;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert(s2u(size) == s2u(arena_salloc(ptr, false)));
|
assert(s2u(size) == s2u(arena_salloc(tsd, ptr, false)));
|
||||||
|
|
||||||
if (likely(size <= SMALL_MAXCLASS)) {
|
if (likely(size <= SMALL_MAXCLASS)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
|
@ -9,12 +9,13 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
void *base_alloc(size_t size);
|
void *base_alloc(tsd_t *tsd, size_t size);
|
||||||
void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
|
void base_stats_get(tsd_t *tsd, size_t *allocated, size_t *resident,
|
||||||
|
size_t *mapped);
|
||||||
bool base_boot(void);
|
bool base_boot(void);
|
||||||
void base_prefork(void);
|
void base_prefork(tsd_t *tsd);
|
||||||
void base_postfork_parent(void);
|
void base_postfork_parent(tsd_t *tsd);
|
||||||
void base_postfork_child(void);
|
void base_postfork_child(tsd_t *tsd);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -48,28 +48,32 @@ extern size_t chunk_npages;
|
|||||||
|
|
||||||
extern const chunk_hooks_t chunk_hooks_default;
|
extern const chunk_hooks_t chunk_hooks_default;
|
||||||
|
|
||||||
chunk_hooks_t chunk_hooks_get(arena_t *arena);
|
chunk_hooks_t chunk_hooks_get(tsd_t *tsd, arena_t *arena);
|
||||||
chunk_hooks_t chunk_hooks_set(arena_t *arena,
|
chunk_hooks_t chunk_hooks_set(tsd_t *tsd, arena_t *arena,
|
||||||
const chunk_hooks_t *chunk_hooks);
|
const chunk_hooks_t *chunk_hooks);
|
||||||
|
|
||||||
bool chunk_register(const void *chunk, const extent_node_t *node);
|
bool chunk_register(tsd_t *tsd, const void *chunk,
|
||||||
|
const extent_node_t *node);
|
||||||
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
void chunk_deregister(const void *chunk, const extent_node_t *node);
|
||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
void *chunk_alloc_cache(tsd_t *tsd, arena_t *arena,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool dalloc_node);
|
bool *zero, bool dalloc_node);
|
||||||
void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
void *chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
bool *zero, bool *commit);
|
||||||
void *chunk, size_t size, bool committed);
|
void chunk_dalloc_cache(tsd_t *tsd, arena_t *arena,
|
||||||
void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
|
||||||
void *chunk, size_t size, bool zeroed, bool committed);
|
void chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena,
|
||||||
bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
|
||||||
void *chunk, size_t size, size_t offset, size_t length);
|
bool committed);
|
||||||
|
bool chunk_purge_wrapper(tsd_t *tsd, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
|
||||||
|
size_t length);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(tsd_t *tsd);
|
||||||
void chunk_postfork_parent(void);
|
void chunk_postfork_parent(tsd_t *tsd);
|
||||||
void chunk_postfork_child(void);
|
void chunk_postfork_child(tsd_t *tsd);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -21,15 +21,15 @@ extern const char *dss_prec_names[];
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
dss_prec_t chunk_dss_prec_get(void);
|
dss_prec_t chunk_dss_prec_get(tsd_t *tsd);
|
||||||
bool chunk_dss_prec_set(dss_prec_t dss_prec);
|
bool chunk_dss_prec_set(tsd_t *tsd, dss_prec_t dss_prec);
|
||||||
void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
|
void *chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr,
|
||||||
size_t alignment, bool *zero, bool *commit);
|
size_t size, size_t alignment, bool *zero, bool *commit);
|
||||||
bool chunk_in_dss(void *chunk);
|
bool chunk_in_dss(tsd_t *tsd, void *chunk);
|
||||||
bool chunk_dss_boot(void);
|
bool chunk_dss_boot(void);
|
||||||
void chunk_dss_prefork(void);
|
void chunk_dss_prefork(tsd_t *tsd);
|
||||||
void chunk_dss_postfork_parent(void);
|
void chunk_dss_postfork_parent(tsd_t *tsd);
|
||||||
void chunk_dss_postfork_child(void);
|
void chunk_dss_postfork_child(tsd_t *tsd);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -21,13 +21,14 @@ struct ctl_named_node_s {
|
|||||||
/* If (nchildren == 0), this is a terminal node. */
|
/* If (nchildren == 0), this is a terminal node. */
|
||||||
unsigned nchildren;
|
unsigned nchildren;
|
||||||
const ctl_node_t *children;
|
const ctl_node_t *children;
|
||||||
int (*ctl)(const size_t *, size_t, void *, size_t *,
|
int (*ctl)(tsd_t *, const size_t *, size_t, void *,
|
||||||
void *, size_t);
|
size_t *, void *, size_t);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ctl_indexed_node_s {
|
struct ctl_indexed_node_s {
|
||||||
struct ctl_node_s node;
|
struct ctl_node_s node;
|
||||||
const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
|
const ctl_named_node_t *(*index)(tsd_t *, const size_t *, size_t,
|
||||||
|
size_t);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ctl_arena_stats_s {
|
struct ctl_arena_stats_s {
|
||||||
@ -68,16 +69,17 @@ struct ctl_stats_s {
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
|
||||||
size_t newlen);
|
|
||||||
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
|
|
||||||
|
|
||||||
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|
||||||
void *newp, size_t newlen);
|
void *newp, size_t newlen);
|
||||||
|
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp,
|
||||||
|
size_t *miblenp);
|
||||||
|
|
||||||
|
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
||||||
|
size_t *oldlenp, void *newp, size_t newlen);
|
||||||
bool ctl_boot(void);
|
bool ctl_boot(void);
|
||||||
void ctl_prefork(void);
|
void ctl_prefork(tsd_t *tsd);
|
||||||
void ctl_postfork_parent(void);
|
void ctl_postfork_parent(tsd_t *tsd);
|
||||||
void ctl_postfork_child(void);
|
void ctl_postfork_child(tsd_t *tsd);
|
||||||
|
|
||||||
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
|
||||||
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
|
||||||
|
@ -18,15 +18,15 @@ bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
|
|||||||
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
typedef void (huge_dalloc_junk_t)(tsd_t *, void *, size_t);
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||||
#endif
|
#endif
|
||||||
void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
||||||
arena_t *huge_aalloc(const void *ptr);
|
arena_t *huge_aalloc(const void *ptr);
|
||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(tsd_t *tsd, const void *ptr);
|
||||||
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
prof_tctx_t *huge_prof_tctx_get(tsd_t *tsd, const void *ptr);
|
||||||
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
void huge_prof_tctx_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx);
|
||||||
void huge_prof_tctx_reset(const void *ptr);
|
void huge_prof_tctx_reset(tsd_t *tsd, const void *ptr);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -368,6 +368,7 @@ typedef unsigned szind_t;
|
|||||||
#include "jemalloc/internal/smoothstep.h"
|
#include "jemalloc/internal/smoothstep.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
#include "jemalloc/internal/ctl.h"
|
#include "jemalloc/internal/ctl.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/tsd.h"
|
#include "jemalloc/internal/tsd.h"
|
||||||
#include "jemalloc/internal/mb.h"
|
#include "jemalloc/internal/mb.h"
|
||||||
@ -399,6 +400,7 @@ typedef unsigned szind_t;
|
|||||||
#include "jemalloc/internal/smoothstep.h"
|
#include "jemalloc/internal/smoothstep.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
#include "jemalloc/internal/ctl.h"
|
#include "jemalloc/internal/ctl.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/mb.h"
|
#include "jemalloc/internal/mb.h"
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
@ -465,7 +467,7 @@ void *bootstrap_malloc(size_t size);
|
|||||||
void *bootstrap_calloc(size_t num, size_t size);
|
void *bootstrap_calloc(size_t num, size_t size);
|
||||||
void bootstrap_free(void *ptr);
|
void bootstrap_free(void *ptr);
|
||||||
unsigned narenas_total_get(void);
|
unsigned narenas_total_get(void);
|
||||||
arena_t *arena_init(unsigned ind);
|
arena_t *arena_init(tsd_t *tsd, unsigned ind);
|
||||||
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
|
||||||
arena_t *arena_choose_hard(tsd_t *tsd);
|
arena_t *arena_choose_hard(tsd_t *tsd);
|
||||||
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
|
||||||
@ -490,6 +492,7 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/smoothstep.h"
|
#include "jemalloc/internal/smoothstep.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
#include "jemalloc/internal/ctl.h"
|
#include "jemalloc/internal/ctl.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/mb.h"
|
#include "jemalloc/internal/mb.h"
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
@ -521,6 +524,7 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/smoothstep.h"
|
#include "jemalloc/internal/smoothstep.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
#include "jemalloc/internal/ctl.h"
|
#include "jemalloc/internal/ctl.h"
|
||||||
|
#include "jemalloc/internal/witness.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/tsd.h"
|
#include "jemalloc/internal/tsd.h"
|
||||||
#include "jemalloc/internal/mb.h"
|
#include "jemalloc/internal/mb.h"
|
||||||
@ -545,7 +549,7 @@ size_t sa2u(size_t size, size_t alignment);
|
|||||||
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
|
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
|
||||||
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
|
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
|
||||||
bool refresh_if_missing);
|
bool refresh_if_missing);
|
||||||
arena_t *arena_get(unsigned ind, bool init_if_missing);
|
arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing);
|
||||||
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -819,7 +823,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE arena_t *
|
JEMALLOC_INLINE arena_t *
|
||||||
arena_get(unsigned ind, bool init_if_missing)
|
arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
||||||
{
|
{
|
||||||
arena_t *ret;
|
arena_t *ret;
|
||||||
|
|
||||||
@ -829,7 +833,7 @@ arena_get(unsigned ind, bool init_if_missing)
|
|||||||
if (unlikely(ret == NULL)) {
|
if (unlikely(ret == NULL)) {
|
||||||
ret = atomic_read_p((void *)&arenas[ind]);
|
ret = atomic_read_p((void *)&arenas[ind]);
|
||||||
if (init_if_missing && unlikely(ret == NULL))
|
if (init_if_missing && unlikely(ret == NULL))
|
||||||
ret = arena_init(ind);
|
ret = arena_init(tsd, ind);
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -863,7 +867,7 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
|
|||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
arena_t *iaalloc(const void *ptr);
|
arena_t *iaalloc(const void *ptr);
|
||||||
size_t isalloc(const void *ptr, bool demote);
|
size_t isalloc(tsd_t *tsd, const void *ptr, bool demote);
|
||||||
void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
|
void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
|
||||||
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
|
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
|
||||||
void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
|
void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
|
||||||
@ -877,9 +881,9 @@ void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
|||||||
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
||||||
tcache_t *tcache, arena_t *arena);
|
tcache_t *tcache, arena_t *arena);
|
||||||
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
||||||
size_t ivsalloc(const void *ptr, bool demote);
|
size_t ivsalloc(tsd_t *tsd, const void *ptr, bool demote);
|
||||||
size_t u2rz(size_t usize);
|
size_t u2rz(size_t usize);
|
||||||
size_t p2rz(const void *ptr);
|
size_t p2rz(tsd_t *tsd, const void *ptr);
|
||||||
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
|
void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
|
||||||
bool slow_path);
|
bool slow_path);
|
||||||
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
|
||||||
@ -914,14 +918,14 @@ iaalloc(const void *ptr)
|
|||||||
* size_t sz = isalloc(ptr, config_prof);
|
* size_t sz = isalloc(ptr, config_prof);
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
isalloc(const void *ptr, bool demote)
|
isalloc(tsd_t *tsd, const void *ptr, bool demote)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
/* Demotion only makes sense if config_prof is true. */
|
/* Demotion only makes sense if config_prof is true. */
|
||||||
assert(config_prof || !demote);
|
assert(config_prof || !demote);
|
||||||
|
|
||||||
return (arena_salloc(ptr, demote));
|
return (arena_salloc(tsd, ptr, demote));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -934,7 +938,7 @@ iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
|||||||
|
|
||||||
ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
|
ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
|
||||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||||
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
|
arena_metadata_allocated_add(iaalloc(ret), isalloc(tsd, ret,
|
||||||
config_prof));
|
config_prof));
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -982,7 +986,7 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
|||||||
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
|
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
|
||||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||||
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
|
arena_metadata_allocated_add(iaalloc(ret), isalloc(tsd, ret,
|
||||||
config_prof));
|
config_prof));
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -1005,7 +1009,7 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
ivsalloc(const void *ptr, bool demote)
|
ivsalloc(tsd_t *tsd, const void *ptr, bool demote)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
|
|
||||||
@ -1017,7 +1021,7 @@ ivsalloc(const void *ptr, bool demote)
|
|||||||
assert(extent_node_addr_get(node) == ptr ||
|
assert(extent_node_addr_get(node) == ptr ||
|
||||||
extent_node_achunk_get(node));
|
extent_node_achunk_get(node));
|
||||||
|
|
||||||
return (isalloc(ptr, demote));
|
return (isalloc(tsd, ptr, demote));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
@ -1035,9 +1039,9 @@ u2rz(size_t usize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
p2rz(const void *ptr)
|
p2rz(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
size_t usize = isalloc(ptr, false);
|
size_t usize = isalloc(tsd, ptr, false);
|
||||||
|
|
||||||
return (u2rz(usize));
|
return (u2rz(usize));
|
||||||
}
|
}
|
||||||
@ -1049,7 +1053,7 @@ idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
|
|||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
if (config_stats && is_metadata) {
|
if (config_stats && is_metadata) {
|
||||||
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
|
arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsd, ptr,
|
||||||
config_prof));
|
config_prof));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,9 +104,9 @@ mb_write(void)
|
|||||||
{
|
{
|
||||||
malloc_mutex_t mtx;
|
malloc_mutex_t mtx;
|
||||||
|
|
||||||
malloc_mutex_init(&mtx);
|
malloc_mutex_init(&mtx, MALLOC_MUTEX_RANK_OMIT);
|
||||||
malloc_mutex_lock(&mtx);
|
malloc_mutex_lock(NULL, &mtx);
|
||||||
malloc_mutex_unlock(&mtx);
|
malloc_mutex_unlock(NULL, &mtx);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
@ -6,17 +6,21 @@ typedef struct malloc_mutex_s malloc_mutex_t;
|
|||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
# define MALLOC_MUTEX_INITIALIZER
|
# define MALLOC_MUTEX_INITIALIZER
|
||||||
#elif (defined(JEMALLOC_OSSPIN))
|
#elif (defined(JEMALLOC_OSSPIN))
|
||||||
# define MALLOC_MUTEX_INITIALIZER {0}
|
# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
||||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
||||||
#else
|
#else
|
||||||
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
|
||||||
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
|
||||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
|
||||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
|
||||||
|
WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
||||||
# else
|
# else
|
||||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||||
# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
|
# define MALLOC_MUTEX_INITIALIZER \
|
||||||
|
{PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
|
||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -39,6 +43,7 @@ struct malloc_mutex_s {
|
|||||||
#else
|
#else
|
||||||
pthread_mutex_t lock;
|
pthread_mutex_t lock;
|
||||||
#endif
|
#endif
|
||||||
|
witness_t witness;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
@ -52,27 +57,31 @@ extern bool isthreaded;
|
|||||||
# define isthreaded true
|
# define isthreaded true
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool malloc_mutex_init(malloc_mutex_t *mutex);
|
bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
|
||||||
void malloc_mutex_prefork(malloc_mutex_t *mutex);
|
witness_rank_t rank);
|
||||||
void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
|
void malloc_mutex_prefork(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
|
void malloc_mutex_postfork_parent(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
bool mutex_boot(void);
|
void malloc_mutex_postfork_child(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
|
bool malloc_mutex_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
void malloc_mutex_lock(malloc_mutex_t *mutex);
|
void malloc_mutex_lock(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
void malloc_mutex_unlock(malloc_mutex_t *mutex);
|
void malloc_mutex_unlock(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
|
void malloc_mutex_assert_owner(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
|
void malloc_mutex_assert_not_owner(tsd_t *tsd, malloc_mutex_t *mutex);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
malloc_mutex_lock(malloc_mutex_t *mutex)
|
malloc_mutex_lock(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
|
witness_assert_not_owner(tsd, &mutex->witness);
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
# if _WIN32_WINNT >= 0x0600
|
# if _WIN32_WINNT >= 0x0600
|
||||||
AcquireSRWLockExclusive(&mutex->lock);
|
AcquireSRWLockExclusive(&mutex->lock);
|
||||||
@ -84,14 +93,19 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
|
|||||||
#else
|
#else
|
||||||
pthread_mutex_lock(&mutex->lock);
|
pthread_mutex_lock(&mutex->lock);
|
||||||
#endif
|
#endif
|
||||||
|
if (config_debug)
|
||||||
|
witness_lock(tsd, &mutex->witness);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
malloc_mutex_unlock(malloc_mutex_t *mutex)
|
malloc_mutex_unlock(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (isthreaded) {
|
if (isthreaded) {
|
||||||
|
witness_assert_owner(tsd, &mutex->witness);
|
||||||
|
if (config_debug)
|
||||||
|
witness_unlock(tsd, &mutex->witness);
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
# if _WIN32_WINNT >= 0x0600
|
# if _WIN32_WINNT >= 0x0600
|
||||||
ReleaseSRWLockExclusive(&mutex->lock);
|
ReleaseSRWLockExclusive(&mutex->lock);
|
||||||
@ -105,6 +119,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
malloc_mutex_assert_owner(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_debug)
|
||||||
|
witness_assert_owner(tsd, &mutex->witness);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
malloc_mutex_assert_not_owner(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (config_debug)
|
||||||
|
witness_assert_not_owner(tsd, &mutex->witness);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
@ -314,6 +314,9 @@ lg_floor
|
|||||||
malloc_cprintf
|
malloc_cprintf
|
||||||
malloc_mutex_init
|
malloc_mutex_init
|
||||||
malloc_mutex_lock
|
malloc_mutex_lock
|
||||||
|
malloc_mutex_assert_not_owner
|
||||||
|
malloc_mutex_assert_owner
|
||||||
|
malloc_mutex_boot
|
||||||
malloc_mutex_postfork_child
|
malloc_mutex_postfork_child
|
||||||
malloc_mutex_postfork_parent
|
malloc_mutex_postfork_parent
|
||||||
malloc_mutex_prefork
|
malloc_mutex_prefork
|
||||||
@ -333,7 +336,6 @@ malloc_write
|
|||||||
map_bias
|
map_bias
|
||||||
map_misc_offset
|
map_misc_offset
|
||||||
mb_write
|
mb_write
|
||||||
mutex_boot
|
|
||||||
narenas_tdata_cleanup
|
narenas_tdata_cleanup
|
||||||
narenas_total_get
|
narenas_total_get
|
||||||
ncpus
|
ncpus
|
||||||
@ -548,3 +550,14 @@ valgrind_freelike_block
|
|||||||
valgrind_make_mem_defined
|
valgrind_make_mem_defined
|
||||||
valgrind_make_mem_noaccess
|
valgrind_make_mem_noaccess
|
||||||
valgrind_make_mem_undefined
|
valgrind_make_mem_undefined
|
||||||
|
witness_assert_lockless
|
||||||
|
witness_assert_not_owner
|
||||||
|
witness_assert_owner
|
||||||
|
witness_init
|
||||||
|
witness_lock
|
||||||
|
witness_lock_error
|
||||||
|
witness_lockless_error
|
||||||
|
witness_not_owner_error
|
||||||
|
witness_owner_error
|
||||||
|
witness_unlock
|
||||||
|
witnesses_cleanup
|
||||||
|
@ -281,7 +281,7 @@ extern uint64_t prof_interval;
|
|||||||
extern size_t lg_prof_sample;
|
extern size_t lg_prof_sample;
|
||||||
|
|
||||||
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
|
||||||
void prof_malloc_sample_object(const void *ptr, size_t usize,
|
void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
prof_tctx_t *tctx);
|
prof_tctx_t *tctx);
|
||||||
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
|
||||||
void bt_init(prof_bt_t *bt, void **vec);
|
void bt_init(prof_bt_t *bt, void **vec);
|
||||||
@ -293,32 +293,32 @@ size_t prof_bt_count(void);
|
|||||||
const prof_cnt_t *prof_cnt_all(void);
|
const prof_cnt_t *prof_cnt_all(void);
|
||||||
typedef int (prof_dump_open_t)(bool, const char *);
|
typedef int (prof_dump_open_t)(bool, const char *);
|
||||||
extern prof_dump_open_t *prof_dump_open;
|
extern prof_dump_open_t *prof_dump_open;
|
||||||
typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
|
typedef bool (prof_dump_header_t)(tsd_t *, bool, const prof_cnt_t *);
|
||||||
extern prof_dump_header_t *prof_dump_header;
|
extern prof_dump_header_t *prof_dump_header;
|
||||||
#endif
|
#endif
|
||||||
void prof_idump(void);
|
void prof_idump(tsd_t *tsd);
|
||||||
bool prof_mdump(const char *filename);
|
bool prof_mdump(tsd_t *tsd, const char *filename);
|
||||||
void prof_gdump(void);
|
void prof_gdump(tsd_t *tsd);
|
||||||
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
prof_tdata_t *prof_tdata_init(tsd_t *tsd);
|
||||||
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
|
||||||
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
void prof_reset(tsd_t *tsd, size_t lg_sample);
|
||||||
void prof_tdata_cleanup(tsd_t *tsd);
|
void prof_tdata_cleanup(tsd_t *tsd);
|
||||||
const char *prof_thread_name_get(void);
|
const char *prof_thread_name_get(tsd_t *tsd);
|
||||||
bool prof_active_get(void);
|
bool prof_active_get(tsd_t *tsd);
|
||||||
bool prof_active_set(bool active);
|
bool prof_active_set(tsd_t *tsd, bool active);
|
||||||
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
|
||||||
bool prof_thread_active_get(void);
|
bool prof_thread_active_get(tsd_t *tsd);
|
||||||
bool prof_thread_active_set(bool active);
|
bool prof_thread_active_set(tsd_t *tsd, bool active);
|
||||||
bool prof_thread_active_init_get(void);
|
bool prof_thread_active_init_get(tsd_t *tsd);
|
||||||
bool prof_thread_active_init_set(bool active_init);
|
bool prof_thread_active_init_set(tsd_t *tsd, bool active_init);
|
||||||
bool prof_gdump_get(void);
|
bool prof_gdump_get(tsd_t *tsd);
|
||||||
bool prof_gdump_set(bool active);
|
bool prof_gdump_set(tsd_t *tsd, bool active);
|
||||||
void prof_boot0(void);
|
void prof_boot0(void);
|
||||||
void prof_boot1(void);
|
void prof_boot1(void);
|
||||||
bool prof_boot2(void);
|
bool prof_boot2(tsd_t *tsd);
|
||||||
void prof_prefork(void);
|
void prof_prefork(tsd_t *tsd);
|
||||||
void prof_postfork_parent(void);
|
void prof_postfork_parent(tsd_t *tsd);
|
||||||
void prof_postfork_child(void);
|
void prof_postfork_child(tsd_t *tsd);
|
||||||
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
void prof_sample_threshold_update(prof_tdata_t *tdata);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
@ -329,17 +329,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
|
|||||||
bool prof_active_get_unlocked(void);
|
bool prof_active_get_unlocked(void);
|
||||||
bool prof_gdump_get_unlocked(void);
|
bool prof_gdump_get_unlocked(void);
|
||||||
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
||||||
|
prof_tctx_t *prof_tctx_get(tsd_t *tsd, const void *ptr);
|
||||||
|
void prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
|
prof_tctx_t *tctx);
|
||||||
|
void prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
|
const void *old_ptr, prof_tctx_t *tctx);
|
||||||
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
||||||
prof_tdata_t **tdata_out);
|
prof_tdata_t **tdata_out);
|
||||||
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
||||||
bool update);
|
bool update);
|
||||||
prof_tctx_t *prof_tctx_get(const void *ptr);
|
void prof_malloc(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
|
||||||
void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
|
||||||
prof_tctx_t *tctx);
|
prof_tctx_t *tctx);
|
||||||
void prof_malloc_sample_object(const void *ptr, size_t usize,
|
|
||||||
prof_tctx_t *tctx);
|
|
||||||
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
|
||||||
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
||||||
size_t old_usize, prof_tctx_t *old_tctx);
|
size_t old_usize, prof_tctx_t *old_tctx);
|
||||||
@ -397,34 +397,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||||
prof_tctx_get(const void *ptr)
|
prof_tctx_get(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
return (arena_prof_tctx_get(ptr));
|
return (arena_prof_tctx_get(tsd, ptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
prof_tctx_set(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
arena_prof_tctx_set(ptr, usize, tctx);
|
arena_prof_tctx_set(tsd, ptr, usize, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
prof_tctx_reset(tsd_t *tsd, const void *ptr, size_t usize, const void *old_ptr,
|
||||||
prof_tctx_t *old_tctx)
|
prof_tctx_t *old_tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
|
arena_prof_tctx_reset(tsd, ptr, usize, old_ptr, old_tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
@ -479,17 +479,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
prof_malloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(usize == isalloc(ptr, true));
|
assert(usize == isalloc(tsd, ptr, true));
|
||||||
|
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||||
prof_malloc_sample_object(ptr, usize, tctx);
|
prof_malloc_sample_object(tsd, ptr, usize, tctx);
|
||||||
else
|
else
|
||||||
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
|
prof_tctx_set(tsd, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -503,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|||||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||||
|
|
||||||
if (prof_active && !updated && ptr != NULL) {
|
if (prof_active && !updated && ptr != NULL) {
|
||||||
assert(usize == isalloc(ptr, true));
|
assert(usize == isalloc(tsd, ptr, true));
|
||||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||||
/*
|
/*
|
||||||
* Don't sample. The usize passed to prof_alloc_prep()
|
* Don't sample. The usize passed to prof_alloc_prep()
|
||||||
@ -520,9 +520,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|||||||
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
||||||
|
|
||||||
if (unlikely(sampled))
|
if (unlikely(sampled))
|
||||||
prof_malloc_sample_object(ptr, usize, tctx);
|
prof_malloc_sample_object(tsd, ptr, usize, tctx);
|
||||||
else
|
else
|
||||||
prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
|
prof_tctx_reset(tsd, ptr, usize, old_ptr, old_tctx);
|
||||||
|
|
||||||
if (unlikely(old_sampled))
|
if (unlikely(old_sampled))
|
||||||
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
||||||
@ -531,10 +531,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
|
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
|
||||||
{
|
{
|
||||||
prof_tctx_t *tctx = prof_tctx_get(ptr);
|
prof_tctx_t *tctx = prof_tctx_get(tsd, ptr);
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(usize == isalloc(ptr, true));
|
assert(usize == isalloc(tsd, ptr, true));
|
||||||
|
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||||
prof_free_sampled_object(tsd, usize, tctx);
|
prof_free_sampled_object(tsd, usize, tctx);
|
||||||
|
@ -130,7 +130,7 @@ extern size_t tcache_maxclass;
|
|||||||
*/
|
*/
|
||||||
extern tcaches_t *tcaches;
|
extern tcaches_t *tcaches;
|
||||||
|
|
||||||
size_t tcache_salloc(const void *ptr);
|
size_t tcache_salloc(tsd_t *tsd, const void *ptr);
|
||||||
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
||||||
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||||
@ -138,19 +138,19 @@ void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
szind_t binind, unsigned rem);
|
szind_t binind, unsigned rem);
|
||||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
unsigned rem, tcache_t *tcache);
|
unsigned rem, tcache_t *tcache);
|
||||||
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
void tcache_arena_associate(tsd_t *tsd, tcache_t *tcache, arena_t *arena);
|
||||||
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
|
void tcache_arena_reassociate(tsd_t *tsd, tcache_t *tcache,
|
||||||
arena_t *newarena);
|
arena_t *oldarena, arena_t *newarena);
|
||||||
void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
|
void tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena);
|
||||||
tcache_t *tcache_get_hard(tsd_t *tsd);
|
tcache_t *tcache_get_hard(tsd_t *tsd);
|
||||||
tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
|
tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
|
||||||
void tcache_cleanup(tsd_t *tsd);
|
void tcache_cleanup(tsd_t *tsd);
|
||||||
void tcache_enabled_cleanup(tsd_t *tsd);
|
void tcache_enabled_cleanup(tsd_t *tsd);
|
||||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
void tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena);
|
||||||
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
|
bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
|
||||||
void tcaches_flush(tsd_t *tsd, unsigned ind);
|
void tcaches_flush(tsd_t *tsd, unsigned ind);
|
||||||
void tcaches_destroy(tsd_t *tsd, unsigned ind);
|
void tcaches_destroy(tsd_t *tsd, unsigned ind);
|
||||||
bool tcache_boot(void);
|
bool tcache_boot(tsd_t *tsd);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -310,7 +310,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
*/
|
*/
|
||||||
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
||||||
usize = index2size(binind);
|
usize = index2size(binind);
|
||||||
assert(tcache_salloc(ret) == usize);
|
assert(tcache_salloc(tsd, ret) == usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(!zero)) {
|
if (likely(!zero)) {
|
||||||
@ -407,7 +407,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
|||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
|
assert(tcache_salloc(tsd, ptr) <= SMALL_MAXCLASS);
|
||||||
|
|
||||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
if (slow_path && config_fill && unlikely(opt_junk_free))
|
||||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||||
@ -434,8 +434,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
|||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
|
assert(tcache_salloc(tsd, ptr) > SMALL_MAXCLASS);
|
||||||
assert(tcache_salloc(ptr) <= tcache_maxclass);
|
assert(tcache_salloc(tsd, ptr) <= tcache_maxclass);
|
||||||
|
|
||||||
binind = size2index(size);
|
binind = size2index(size);
|
||||||
|
|
||||||
|
@ -542,6 +542,7 @@ struct tsd_init_head_s {
|
|||||||
O(arenas_tdata_bypass, bool) \
|
O(arenas_tdata_bypass, bool) \
|
||||||
O(tcache_enabled, tcache_enabled_t) \
|
O(tcache_enabled, tcache_enabled_t) \
|
||||||
O(quarantine, quarantine_t *) \
|
O(quarantine, quarantine_t *) \
|
||||||
|
O(witnesses, witness_list_t) \
|
||||||
|
|
||||||
#define TSD_INITIALIZER { \
|
#define TSD_INITIALIZER { \
|
||||||
tsd_state_uninitialized, \
|
tsd_state_uninitialized, \
|
||||||
@ -554,7 +555,8 @@ struct tsd_init_head_s {
|
|||||||
0, \
|
0, \
|
||||||
false, \
|
false, \
|
||||||
tcache_enabled_default, \
|
tcache_enabled_default, \
|
||||||
NULL \
|
NULL, \
|
||||||
|
ql_head_initializer(witnesses) \
|
||||||
}
|
}
|
||||||
|
|
||||||
struct tsd_s {
|
struct tsd_s {
|
||||||
@ -577,7 +579,7 @@ void *malloc_tsd_malloc(size_t size);
|
|||||||
void malloc_tsd_dalloc(void *wrapper);
|
void malloc_tsd_dalloc(void *wrapper);
|
||||||
void malloc_tsd_no_cleanup(void *arg);
|
void malloc_tsd_no_cleanup(void *arg);
|
||||||
void malloc_tsd_cleanup_register(bool (*f)(void));
|
void malloc_tsd_cleanup_register(bool (*f)(void));
|
||||||
bool malloc_tsd_boot0(void);
|
tsd_t *malloc_tsd_boot0(void);
|
||||||
void malloc_tsd_boot1(void);
|
void malloc_tsd_boot1(void);
|
||||||
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
|
||||||
!defined(_WIN32))
|
!defined(_WIN32))
|
||||||
|
@ -30,15 +30,17 @@
|
|||||||
* calls must be embedded in macros rather than in functions so that when
|
* calls must be embedded in macros rather than in functions so that when
|
||||||
* Valgrind reports errors, there are no extra stack frames in the backtraces.
|
* Valgrind reports errors, there are no extra stack frames in the backtraces.
|
||||||
*/
|
*/
|
||||||
#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
|
#define JEMALLOC_VALGRIND_MALLOC(cond, tsd, ptr, usize, zero) do { \
|
||||||
if (unlikely(in_valgrind && cond)) \
|
if (unlikely(in_valgrind && cond)) { \
|
||||||
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
|
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsd, ptr), \
|
||||||
|
zero); \
|
||||||
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
|
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsd, ptr, usize, \
|
||||||
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
|
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
|
||||||
zero) do { \
|
zero) do { \
|
||||||
if (unlikely(in_valgrind)) { \
|
if (unlikely(in_valgrind)) { \
|
||||||
size_t rzsize = p2rz(ptr); \
|
size_t rzsize = p2rz(tsd, ptr); \
|
||||||
\
|
\
|
||||||
if (!maybe_moved || ptr == old_ptr) { \
|
if (!maybe_moved || ptr == old_ptr) { \
|
||||||
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
|
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
|
||||||
|
103
include/jemalloc/internal/witness.h
Normal file
103
include/jemalloc/internal/witness.h
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
|
typedef struct witness_s witness_t;
|
||||||
|
typedef unsigned witness_rank_t;
|
||||||
|
typedef ql_head(witness_t) witness_list_t;
|
||||||
|
typedef int witness_comp_t (const witness_t *, const witness_t *);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
|
||||||
|
* the witness machinery.
|
||||||
|
*/
|
||||||
|
#define WITNESS_RANK_OMIT 0U
|
||||||
|
|
||||||
|
#define WITNESS_RANK_INIT 1U
|
||||||
|
#define WITNESS_RANK_CTL 1U
|
||||||
|
#define WITNESS_RANK_ARENAS 2U
|
||||||
|
|
||||||
|
#define WITNESS_RANK_PROF_DUMP 3U
|
||||||
|
#define WITNESS_RANK_PROF_BT2GCTX 4U
|
||||||
|
#define WITNESS_RANK_PROF_TDATAS 5U
|
||||||
|
#define WITNESS_RANK_PROF_TDATA 6U
|
||||||
|
#define WITNESS_RANK_PROF_GCTX 7U
|
||||||
|
|
||||||
|
#define WITNESS_RANK_ARENA 8U
|
||||||
|
#define WITNESS_RANK_ARENA_CHUNKS 9U
|
||||||
|
#define WITNESS_RANK_ARENA_NODE_CACHE 10
|
||||||
|
|
||||||
|
#define WITNESS_RANK_BASE 11U
|
||||||
|
|
||||||
|
#define WITNESS_RANK_LEAF 0xffffffffU
|
||||||
|
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
|
||||||
|
#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
|
||||||
|
|
||||||
|
#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
struct witness_s {
|
||||||
|
/* Name, used for printing lock order reversal messages. */
|
||||||
|
const char *name;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
|
||||||
|
* must be acquired in order of increasing rank.
|
||||||
|
*/
|
||||||
|
witness_rank_t rank;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If two witnesses are of equal rank and they have the samp comp
|
||||||
|
* function pointer, it is called as a last attempt to differentiate
|
||||||
|
* between witnesses of equal rank.
|
||||||
|
*/
|
||||||
|
witness_comp_t *comp;
|
||||||
|
|
||||||
|
/* Linkage for thread's currently owned locks. */
|
||||||
|
ql_elm(witness_t) link;
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
|
void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
|
||||||
|
witness_comp_t *comp);
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
|
||||||
|
extern witness_lock_error_t *witness_lock_error;
|
||||||
|
#endif
|
||||||
|
void witness_lock(tsd_t *tsd, witness_t *witness);
|
||||||
|
void witness_unlock(tsd_t *tsd, witness_t *witness);
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
typedef void (witness_owner_error_t)(const witness_t *);
|
||||||
|
extern witness_owner_error_t *witness_owner_error;
|
||||||
|
#endif
|
||||||
|
void witness_assert_owner(tsd_t *tsd, const witness_t *witness);
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
typedef void (witness_not_owner_error_t)(const witness_t *);
|
||||||
|
extern witness_not_owner_error_t *witness_not_owner_error;
|
||||||
|
#endif
|
||||||
|
void witness_assert_not_owner(tsd_t *tsd, const witness_t *witness);
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
typedef void (witness_lockless_error_t)(const witness_list_t *);
|
||||||
|
extern witness_lockless_error_t *witness_lockless_error;
|
||||||
|
#endif
|
||||||
|
void witness_assert_lockless(tsd_t *tsd);
|
||||||
|
|
||||||
|
void witnesses_cleanup(tsd_t *tsd);
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
/******************************************************************************/
|
||||||
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
|
/******************************************************************************/
|
568
src/arena.c
568
src/arena.c
File diff suppressed because it is too large
Load Diff
26
src/base.c
26
src/base.c
@ -76,7 +76,7 @@ base_chunk_alloc(size_t minsize)
|
|||||||
* physical memory usage.
|
* physical memory usage.
|
||||||
*/
|
*/
|
||||||
void *
|
void *
|
||||||
base_alloc(size_t size)
|
base_alloc(tsd_t *tsd, size_t size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize, usize;
|
size_t csize, usize;
|
||||||
@ -91,7 +91,7 @@ base_alloc(size_t size)
|
|||||||
|
|
||||||
usize = s2u(csize);
|
usize = s2u(csize);
|
||||||
extent_node_init(&key, NULL, NULL, usize, false, false);
|
extent_node_init(&key, NULL, NULL, usize, false, false);
|
||||||
malloc_mutex_lock(&base_mtx);
|
malloc_mutex_lock(tsd, &base_mtx);
|
||||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||||
if (node != NULL) {
|
if (node != NULL) {
|
||||||
/* Use existing space. */
|
/* Use existing space. */
|
||||||
@ -123,28 +123,28 @@ base_alloc(size_t size)
|
|||||||
}
|
}
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
|
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
|
||||||
label_return:
|
label_return:
|
||||||
malloc_mutex_unlock(&base_mtx);
|
malloc_mutex_unlock(tsd, &base_mtx);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
|
base_stats_get(tsd_t *tsd, size_t *allocated, size_t *resident, size_t *mapped)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_lock(&base_mtx);
|
malloc_mutex_lock(tsd, &base_mtx);
|
||||||
assert(base_allocated <= base_resident);
|
assert(base_allocated <= base_resident);
|
||||||
assert(base_resident <= base_mapped);
|
assert(base_resident <= base_mapped);
|
||||||
*allocated = base_allocated;
|
*allocated = base_allocated;
|
||||||
*resident = base_resident;
|
*resident = base_resident;
|
||||||
*mapped = base_mapped;
|
*mapped = base_mapped;
|
||||||
malloc_mutex_unlock(&base_mtx);
|
malloc_mutex_unlock(tsd, &base_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
base_boot(void)
|
base_boot(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (malloc_mutex_init(&base_mtx))
|
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
|
||||||
return (true);
|
return (true);
|
||||||
extent_tree_szad_new(&base_avail_szad);
|
extent_tree_szad_new(&base_avail_szad);
|
||||||
base_nodes = NULL;
|
base_nodes = NULL;
|
||||||
@ -153,22 +153,22 @@ base_boot(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
base_prefork(void)
|
base_prefork(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_prefork(&base_mtx);
|
malloc_mutex_prefork(tsd, &base_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
base_postfork_parent(void)
|
base_postfork_parent(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_postfork_parent(&base_mtx);
|
malloc_mutex_postfork_parent(tsd, &base_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
base_postfork_child(void)
|
base_postfork_child(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_postfork_child(&base_mtx);
|
malloc_mutex_postfork_child(tsd, &base_mtx);
|
||||||
}
|
}
|
||||||
|
184
src/chunk.c
184
src/chunk.c
@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
|
|||||||
* definition.
|
* definition.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
static void chunk_record(tsd_t *tsd, arena_t *arena,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
|
||||||
void *chunk, size_t size, bool zeroed, bool committed);
|
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
|
||||||
|
bool committed);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
chunk_hooks_t
|
chunk_hooks_t
|
||||||
chunk_hooks_get(arena_t *arena)
|
chunk_hooks_get(tsd_t *tsd, arena_t *arena)
|
||||||
{
|
{
|
||||||
chunk_hooks_t chunk_hooks;
|
chunk_hooks_t chunk_hooks;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(tsd, &arena->chunks_mtx);
|
||||||
chunk_hooks = chunk_hooks_get_locked(arena);
|
chunk_hooks = chunk_hooks_get_locked(arena);
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
|
|
||||||
return (chunk_hooks);
|
return (chunk_hooks);
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk_hooks_t
|
chunk_hooks_t
|
||||||
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
chunk_hooks_set(tsd_t *tsd, arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||||
{
|
{
|
||||||
chunk_hooks_t old_chunk_hooks;
|
chunk_hooks_t old_chunk_hooks;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(tsd, &arena->chunks_mtx);
|
||||||
old_chunk_hooks = arena->chunk_hooks;
|
old_chunk_hooks = arena->chunk_hooks;
|
||||||
/*
|
/*
|
||||||
* Copy each field atomically so that it is impossible for readers to
|
* Copy each field atomically so that it is impossible for readers to
|
||||||
@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
|||||||
ATOMIC_COPY_HOOK(split);
|
ATOMIC_COPY_HOOK(split);
|
||||||
ATOMIC_COPY_HOOK(merge);
|
ATOMIC_COPY_HOOK(merge);
|
||||||
#undef ATOMIC_COPY_HOOK
|
#undef ATOMIC_COPY_HOOK
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
|
|
||||||
return (old_chunk_hooks);
|
return (old_chunk_hooks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_hooks_assure_initialized_impl(tsd_t *tsd, arena_t *arena,
|
||||||
bool locked)
|
chunk_hooks_t *chunk_hooks, bool locked)
|
||||||
{
|
{
|
||||||
static const chunk_hooks_t uninitialized_hooks =
|
static const chunk_hooks_t uninitialized_hooks =
|
||||||
CHUNK_HOOKS_INITIALIZER;
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
||||||
0) {
|
0) {
|
||||||
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
||||||
chunk_hooks_get(arena);
|
chunk_hooks_get(tsd, arena);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
chunk_hooks_assure_initialized_locked(tsd_t *tsd, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks)
|
chunk_hooks_t *chunk_hooks)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
chunk_hooks_assure_initialized(tsd_t *tsd, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
chunk_hooks_assure_initialized_impl(tsd, arena, chunk_hooks, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_register(const void *chunk, const extent_node_t *node)
|
chunk_register(tsd_t *tsd, const void *chunk, const extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(extent_node_addr_get(node) == chunk);
|
assert(extent_node_addr_get(node) == chunk);
|
||||||
@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
|
|||||||
high = atomic_read_z(&highchunks);
|
high = atomic_read_z(&highchunks);
|
||||||
}
|
}
|
||||||
if (cur > high && prof_gdump_get_unlocked())
|
if (cur > high && prof_gdump_get_unlocked())
|
||||||
prof_gdump();
|
prof_gdump(tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
@ -197,7 +199,7 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_recycle(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||||
bool dalloc_node)
|
bool dalloc_node)
|
||||||
@ -219,8 +221,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(tsd, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks);
|
||||||
if (new_addr != NULL) {
|
if (new_addr != NULL) {
|
||||||
extent_node_t key;
|
extent_node_t key;
|
||||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
||||||
@ -232,7 +234,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||||
size)) {
|
size)) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
||||||
@ -251,7 +253,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (leadsize != 0 &&
|
if (leadsize != 0 &&
|
||||||
chunk_hooks->split(extent_node_addr_get(node),
|
chunk_hooks->split(extent_node_addr_get(node),
|
||||||
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Remove node from the tree. */
|
/* Remove node from the tree. */
|
||||||
@ -271,20 +273,21 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||||
trailsize, false, arena->ind)) {
|
trailsize, false, arena->ind)) {
|
||||||
if (dalloc_node && node != NULL)
|
if (dalloc_node && node != NULL)
|
||||||
arena_node_dalloc(arena, node);
|
arena_node_dalloc(tsd, arena, node);
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
chunk_record(tsd, arena, chunk_hooks, chunks_szad,
|
||||||
cache, ret, size + trailsize, zeroed, committed);
|
chunks_ad, cache, ret, size + trailsize, zeroed,
|
||||||
|
committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
/* Insert the trailing space as a smaller chunk. */
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
node = arena_node_alloc(arena);
|
node = arena_node_alloc(tsd, arena);
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
chunk_record(arena, chunk_hooks, chunks_szad,
|
chunk_record(tsd, arena, chunk_hooks,
|
||||||
chunks_ad, cache, ret, size + trailsize,
|
chunks_szad, chunks_ad, cache, ret, size +
|
||||||
zeroed, committed);
|
trailsize, zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -296,16 +299,16 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
node = NULL;
|
node = NULL;
|
||||||
}
|
}
|
||||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
chunk_record(tsd, arena, chunk_hooks, chunks_szad, chunks_ad,
|
||||||
ret, size, zeroed, committed);
|
cache, ret, size, zeroed, committed);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
|
|
||||||
assert(dalloc_node || node != NULL);
|
assert(dalloc_node || node != NULL);
|
||||||
if (dalloc_node && node != NULL)
|
if (dalloc_node && node != NULL)
|
||||||
arena_node_dalloc(arena, node);
|
arena_node_dalloc(tsd, arena, node);
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
if (!zeroed)
|
if (!zeroed)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
@ -328,8 +331,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
* them if they are returned.
|
* them if they are returned.
|
||||||
*/
|
*/
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
chunk_alloc_core(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
|
||||||
bool *zero, bool *commit, dss_prec_t dss_prec)
|
size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -340,8 +343,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
|
|
||||||
/* "primary" dss. */
|
/* "primary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero,
|
||||||
NULL)
|
commit)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
/* mmap. */
|
/* mmap. */
|
||||||
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
|
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
|
||||||
@ -349,8 +352,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
return (ret);
|
return (ret);
|
||||||
/* "secondary" dss. */
|
/* "secondary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
|
chunk_alloc_dss(tsd, arena, new_addr, size, alignment, zero,
|
||||||
NULL)
|
commit)) != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
|
|
||||||
/* All strategies for allocation failed. */
|
/* All strategies for allocation failed. */
|
||||||
@ -380,8 +383,8 @@ chunk_alloc_base(size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
chunk_alloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
bool commit;
|
bool commit;
|
||||||
@ -392,7 +395,7 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
|||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
commit = true;
|
commit = true;
|
||||||
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
ret = chunk_recycle(tsd, arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||||
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
||||||
&commit, dalloc_node);
|
&commit, dalloc_node);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
@ -404,11 +407,11 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static arena_t *
|
static arena_t *
|
||||||
chunk_arena_get(unsigned arena_ind)
|
chunk_arena_get(tsd_t *tsd, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
arena = arena_get(arena_ind, false);
|
arena = arena_get(tsd, arena_ind, false);
|
||||||
/*
|
/*
|
||||||
* The arena we're allocating on behalf of must have been initialized
|
* The arena we're allocating on behalf of must have been initialized
|
||||||
* already.
|
* already.
|
||||||
@ -422,11 +425,13 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
bool *commit, unsigned arena_ind)
|
bool *commit, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
tsd_t *tsd;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
arena = chunk_arena_get(arena_ind);
|
tsd = tsd_fetch();
|
||||||
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, commit,
|
arena = chunk_arena_get(tsd, arena_ind);
|
||||||
arena->dss_prec);
|
ret = chunk_alloc_core(tsd, arena, new_addr, size, alignment, zero,
|
||||||
|
commit, arena->dss_prec);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (config_valgrind)
|
if (config_valgrind)
|
||||||
@ -436,8 +441,8 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
chunk_alloc_retained(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
@ -445,20 +450,20 @@ chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
|||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
|
return (chunk_recycle(tsd, arena, chunk_hooks,
|
||||||
&arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
|
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
||||||
commit, true));
|
new_addr, size, alignment, zero, commit, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
chunk_alloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
|
||||||
|
|
||||||
ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
|
ret = chunk_alloc_retained(tsd, arena, chunk_hooks, new_addr, size,
|
||||||
alignment, zero, commit);
|
alignment, zero, commit);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
|
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
|
||||||
@ -473,7 +478,7 @@ chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_record(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||||
void *chunk, size_t size, bool zeroed, bool committed)
|
void *chunk, size_t size, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
@ -485,8 +490,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
unzeroed = cache || !zeroed;
|
unzeroed = cache || !zeroed;
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->chunks_mtx);
|
malloc_mutex_lock(tsd, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsd, arena, chunk_hooks);
|
||||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
||||||
false, false);
|
false, false);
|
||||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||||
@ -511,7 +516,7 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
} else {
|
} else {
|
||||||
/* Coalescing forward failed, so insert a new node. */
|
/* Coalescing forward failed, so insert a new node. */
|
||||||
node = arena_node_alloc(arena);
|
node = arena_node_alloc(tsd, arena);
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
/*
|
/*
|
||||||
* Node allocation failed, which is an exceedingly
|
* Node allocation failed, which is an exceedingly
|
||||||
@ -520,8 +525,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
* a virtual memory leak.
|
* a virtual memory leak.
|
||||||
*/
|
*/
|
||||||
if (cache) {
|
if (cache) {
|
||||||
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
chunk_purge_wrapper(tsd, arena, chunk_hooks,
|
||||||
size, 0, size);
|
chunk, size, 0, size);
|
||||||
}
|
}
|
||||||
goto label_return;
|
goto label_return;
|
||||||
}
|
}
|
||||||
@ -557,16 +562,16 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
extent_tree_szad_insert(chunks_szad, node);
|
extent_tree_szad_insert(chunks_szad, node);
|
||||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||||
|
|
||||||
arena_node_dalloc(arena, prev);
|
arena_node_dalloc(tsd, arena, prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
label_return:
|
label_return:
|
||||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
malloc_mutex_unlock(tsd, &arena->chunks_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
chunk_dalloc_cache(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t size, bool committed)
|
void *chunk, size_t size, bool committed)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -574,9 +579,9 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||||
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
&arena->chunks_ad_cached, true, chunk, size, false, committed);
|
||||||
arena_maybe_purge(arena);
|
arena_maybe_purge(tsd, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -584,14 +589,14 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
|
|||||||
unsigned arena_ind)
|
unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (!have_dss || !chunk_in_dss(chunk))
|
if (!have_dss || !chunk_in_dss(tsd_fetch(), chunk))
|
||||||
return (chunk_dalloc_mmap(chunk, size));
|
return (chunk_dalloc_mmap(chunk, size));
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
chunk_dalloc_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t size, bool zeroed, bool committed)
|
void *chunk, size_t size, bool zeroed, bool committed)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -599,7 +604,7 @@ chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
|
||||||
/* Try to deallocate. */
|
/* Try to deallocate. */
|
||||||
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
|
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
|
||||||
return;
|
return;
|
||||||
@ -610,7 +615,7 @@ chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
|||||||
}
|
}
|
||||||
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
|
||||||
arena->ind);
|
arena->ind);
|
||||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
chunk_record(tsd, arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||||
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -648,11 +653,11 @@ chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
chunk_purge_wrapper(tsd_t *tsd, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
size_t size, size_t offset, size_t length)
|
void *chunk, size_t size, size_t offset, size_t length)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsd, arena, chunk_hooks);
|
||||||
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -673,8 +678,11 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
|||||||
|
|
||||||
if (!maps_coalesce)
|
if (!maps_coalesce)
|
||||||
return (true);
|
return (true);
|
||||||
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
if (have_dss) {
|
||||||
|
tsd_t *tsd = tsd_fetch();
|
||||||
|
if (chunk_in_dss(tsd, chunk_a) != chunk_in_dss(tsd, chunk_b))
|
||||||
return (true);
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -683,7 +691,7 @@ static rtree_node_elm_t *
|
|||||||
chunks_rtree_node_alloc(size_t nelms)
|
chunks_rtree_node_alloc(size_t nelms)
|
||||||
{
|
{
|
||||||
|
|
||||||
return ((rtree_node_elm_t *)base_alloc(nelms *
|
return ((rtree_node_elm_t *)base_alloc(tsd_fetch(), nelms *
|
||||||
sizeof(rtree_node_elm_t)));
|
sizeof(rtree_node_elm_t)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -730,22 +738,22 @@ chunk_boot(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_prefork(void)
|
chunk_prefork(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_dss_prefork();
|
chunk_dss_prefork(tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_postfork_parent(void)
|
chunk_postfork_parent(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_dss_postfork_parent();
|
chunk_dss_postfork_parent(tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_postfork_child(void)
|
chunk_postfork_child(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
chunk_dss_postfork_child();
|
chunk_dss_postfork_child(tsd);
|
||||||
}
|
}
|
||||||
|
@ -41,33 +41,33 @@ chunk_dss_sbrk(intptr_t increment)
|
|||||||
}
|
}
|
||||||
|
|
||||||
dss_prec_t
|
dss_prec_t
|
||||||
chunk_dss_prec_get(void)
|
chunk_dss_prec_get(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
dss_prec_t ret;
|
dss_prec_t ret;
|
||||||
|
|
||||||
if (!have_dss)
|
if (!have_dss)
|
||||||
return (dss_prec_disabled);
|
return (dss_prec_disabled);
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(tsd, &dss_mtx);
|
||||||
ret = dss_prec_default;
|
ret = dss_prec_default;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(tsd, &dss_mtx);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_dss_prec_set(dss_prec_t dss_prec)
|
chunk_dss_prec_set(tsd_t *tsd, dss_prec_t dss_prec)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (!have_dss)
|
if (!have_dss)
|
||||||
return (dss_prec != dss_prec_disabled);
|
return (dss_prec != dss_prec_disabled);
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(tsd, &dss_mtx);
|
||||||
dss_prec_default = dss_prec;
|
dss_prec_default = dss_prec;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(tsd, &dss_mtx);
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
chunk_alloc_dss(tsd_t *tsd, arena_t *arena, void *new_addr, size_t size,
|
||||||
bool *zero, bool *commit)
|
size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||||
@ -80,7 +80,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
if ((intptr_t)size < 0)
|
if ((intptr_t)size < 0)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(tsd, &dss_mtx);
|
||||||
if (dss_prev != (void *)-1) {
|
if (dss_prev != (void *)-1) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -122,7 +122,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
||||||
/* Wrap-around. */
|
/* Wrap-around. */
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(tsd, &dss_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
incr = gap_size + cpad_size + size;
|
incr = gap_size + cpad_size + size;
|
||||||
@ -130,11 +130,11 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
if (dss_prev == dss_max) {
|
if (dss_prev == dss_max) {
|
||||||
/* Success. */
|
/* Success. */
|
||||||
dss_max = dss_next;
|
dss_max = dss_next;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(tsd, &dss_mtx);
|
||||||
if (cpad_size != 0) {
|
if (cpad_size != 0) {
|
||||||
chunk_hooks_t chunk_hooks =
|
chunk_hooks_t chunk_hooks =
|
||||||
CHUNK_HOOKS_INITIALIZER;
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
chunk_dalloc_wrapper(arena,
|
chunk_dalloc_wrapper(tsd, arena,
|
||||||
&chunk_hooks, cpad, cpad_size,
|
&chunk_hooks, cpad, cpad_size,
|
||||||
false, true);
|
false, true);
|
||||||
}
|
}
|
||||||
@ -149,25 +149,25 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
}
|
}
|
||||||
} while (dss_prev != (void *)-1);
|
} while (dss_prev != (void *)-1);
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(tsd, &dss_mtx);
|
||||||
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_in_dss(void *chunk)
|
chunk_in_dss(tsd_t *tsd, void *chunk)
|
||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(tsd, &dss_mtx);
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||||
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
||||||
ret = true;
|
ret = true;
|
||||||
else
|
else
|
||||||
ret = false;
|
ret = false;
|
||||||
malloc_mutex_unlock(&dss_mtx);
|
malloc_mutex_unlock(tsd, &dss_mtx);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -178,7 +178,7 @@ chunk_dss_boot(void)
|
|||||||
|
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
|
|
||||||
if (malloc_mutex_init(&dss_mtx))
|
if (malloc_mutex_init(&dss_mtx, "dss", WITNESS_RANK_DSS))
|
||||||
return (true);
|
return (true);
|
||||||
dss_base = chunk_dss_sbrk(0);
|
dss_base = chunk_dss_sbrk(0);
|
||||||
dss_prev = dss_base;
|
dss_prev = dss_base;
|
||||||
@ -188,27 +188,27 @@ chunk_dss_boot(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dss_prefork(void)
|
chunk_dss_prefork(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (have_dss)
|
if (have_dss)
|
||||||
malloc_mutex_prefork(&dss_mtx);
|
malloc_mutex_prefork(tsd, &dss_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dss_postfork_parent(void)
|
chunk_dss_postfork_parent(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (have_dss)
|
if (have_dss)
|
||||||
malloc_mutex_postfork_parent(&dss_mtx);
|
malloc_mutex_postfork_parent(tsd, &dss_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_dss_postfork_child(void)
|
chunk_dss_postfork_child(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (have_dss)
|
if (have_dss)
|
||||||
malloc_mutex_postfork_child(&dss_mtx);
|
malloc_mutex_postfork_child(tsd, &dss_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
106
src/huge.c
106
src/huge.c
@ -15,12 +15,12 @@ huge_node_get(const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_node_set(const void *ptr, extent_node_t *node)
|
huge_node_set(tsd_t *tsd, const void *ptr, extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(extent_node_addr_get(node) == ptr);
|
assert(extent_node_addr_get(node) == ptr);
|
||||||
assert(!extent_node_achunk_get(node));
|
assert(!extent_node_achunk_get(node));
|
||||||
return (chunk_register(ptr, node));
|
return (chunk_register(tsd, ptr, node));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -68,7 +68,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
*/
|
*/
|
||||||
is_zeroed = zero;
|
is_zeroed = zero;
|
||||||
arena = arena_choose(tsd, arena);
|
arena = arena_choose(tsd, arena);
|
||||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
|
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsd, arena,
|
||||||
usize, alignment, &is_zeroed)) == NULL) {
|
usize, alignment, &is_zeroed)) == NULL) {
|
||||||
idalloctm(tsd, node, tcache, true, true);
|
idalloctm(tsd, node, tcache, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -76,17 +76,17 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
|
|
||||||
extent_node_init(node, arena, ret, usize, is_zeroed, true);
|
extent_node_init(node, arena, ret, usize, is_zeroed, true);
|
||||||
|
|
||||||
if (huge_node_set(ret, node)) {
|
if (huge_node_set(tsd, ret, node)) {
|
||||||
arena_chunk_dalloc_huge(arena, ret, usize);
|
arena_chunk_dalloc_huge(tsd, arena, ret, usize);
|
||||||
idalloctm(tsd, node, tcache, true, true);
|
idalloctm(tsd, node, tcache, true, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert node into huge. */
|
/* Insert node into huge. */
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
ql_elm_new(node, ql_link);
|
ql_elm_new(node, ql_link);
|
||||||
ql_tail_insert(&arena->huge, node, ql_link);
|
ql_tail_insert(&arena->huge, node, ql_link);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed)
|
if (!is_zeroed)
|
||||||
@ -103,7 +103,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
|
||||||
#endif
|
#endif
|
||||||
static void
|
static void
|
||||||
huge_dalloc_junk(void *ptr, size_t usize)
|
huge_dalloc_junk(tsd_t *tsd, void *ptr, size_t usize)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
if (config_fill && have_dss && unlikely(opt_junk_free)) {
|
||||||
@ -111,7 +111,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
|
|||||||
* Only bother junk filling if the chunk isn't about to be
|
* Only bother junk filling if the chunk isn't about to be
|
||||||
* unmapped.
|
* unmapped.
|
||||||
*/
|
*/
|
||||||
if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
|
if (!config_munmap || (have_dss && chunk_in_dss(tsd, ptr)))
|
||||||
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
memset(ptr, JEMALLOC_FREE_JUNK, usize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -122,8 +122,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
huge_ralloc_no_move_similar(tsd_t *tsd, void *ptr, size_t oldsize,
|
||||||
size_t usize_max, bool zero)
|
size_t usize_min, size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t usize, usize_next;
|
size_t usize, usize_next;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
@ -151,21 +151,22 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
|||||||
JEMALLOC_FREE_JUNK, sdiff);
|
JEMALLOC_FREE_JUNK, sdiff);
|
||||||
post_zeroed = false;
|
post_zeroed = false;
|
||||||
} else {
|
} else {
|
||||||
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
post_zeroed = !chunk_purge_wrapper(tsd, arena,
|
||||||
ptr, CHUNK_CEILING(oldsize), usize, sdiff);
|
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
|
||||||
|
sdiff);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
post_zeroed = pre_zeroed;
|
post_zeroed = pre_zeroed;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
assert(extent_node_size_get(node) != usize);
|
assert(extent_node_size_get(node) != usize);
|
||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_node_zeroed_set(node, post_zeroed);
|
extent_node_zeroed_set(node, post_zeroed);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_similar(tsd, arena, ptr, oldsize, usize);
|
||||||
|
|
||||||
/* Fill if necessary (growing). */
|
/* Fill if necessary (growing). */
|
||||||
if (oldsize < usize) {
|
if (oldsize < usize) {
|
||||||
@ -182,7 +183,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
huge_ralloc_no_move_shrink(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
@ -193,7 +194,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
pre_zeroed = extent_node_zeroed_get(node);
|
pre_zeroed = extent_node_zeroed_get(node);
|
||||||
chunk_hooks = chunk_hooks_get(arena);
|
chunk_hooks = chunk_hooks_get(tsd, arena);
|
||||||
|
|
||||||
assert(oldsize > usize);
|
assert(oldsize > usize);
|
||||||
|
|
||||||
@ -206,42 +207,43 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
size_t sdiff = oldsize - usize;
|
size_t sdiff = oldsize - usize;
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
huge_dalloc_junk(tsd, (void *)((uintptr_t)ptr + usize),
|
||||||
sdiff);
|
sdiff);
|
||||||
post_zeroed = false;
|
post_zeroed = false;
|
||||||
} else {
|
} else {
|
||||||
post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
post_zeroed = !chunk_purge_wrapper(tsd, arena,
|
||||||
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
|
||||||
CHUNK_CEILING(oldsize),
|
usize), CHUNK_CEILING(oldsize),
|
||||||
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
post_zeroed = pre_zeroed;
|
post_zeroed = pre_zeroed;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_node_zeroed_set(node, post_zeroed);
|
extent_node_zeroed_set(node, post_zeroed);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
/* Zap the excess chunks. */
|
||||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_shrink(tsd, arena, ptr, oldsize, usize);
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
|
huge_ralloc_no_move_expand(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize,
|
||||||
|
bool zero) {
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
bool is_zeroed_subchunk, is_zeroed_chunk;
|
bool is_zeroed_subchunk, is_zeroed_chunk;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
|
||||||
@ -249,14 +251,14 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
|
|||||||
*/
|
*/
|
||||||
is_zeroed_chunk = zero;
|
is_zeroed_chunk = zero;
|
||||||
|
|
||||||
if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
|
if (arena_chunk_ralloc_huge_expand(tsd, arena, ptr, oldsize, usize,
|
||||||
&is_zeroed_chunk))
|
&is_zeroed_chunk))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
/* Update the size of the huge allocation. */
|
/* Update the size of the huge allocation. */
|
||||||
extent_node_size_set(node, usize);
|
extent_node_size_set(node, usize);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed_subchunk) {
|
if (!is_zeroed_subchunk) {
|
||||||
@ -291,15 +293,15 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
|
|||||||
|
|
||||||
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
|
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
|
||||||
/* Attempt to expand the allocation in-place. */
|
/* Attempt to expand the allocation in-place. */
|
||||||
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
|
if (!huge_ralloc_no_move_expand(tsd, ptr, oldsize, usize_max,
|
||||||
zero)) {
|
zero)) {
|
||||||
arena_decay_tick(tsd, huge_aalloc(ptr));
|
arena_decay_tick(tsd, huge_aalloc(ptr));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
/* Try again, this time with usize_min. */
|
/* Try again, this time with usize_min. */
|
||||||
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
||||||
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
|
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsd,
|
||||||
oldsize, usize_min, zero)) {
|
ptr, oldsize, usize_min, zero)) {
|
||||||
arena_decay_tick(tsd, huge_aalloc(ptr));
|
arena_decay_tick(tsd, huge_aalloc(ptr));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -311,15 +313,15 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
|
|||||||
*/
|
*/
|
||||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
|
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
|
||||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
|
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
|
||||||
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
|
huge_ralloc_no_move_similar(tsd, ptr, oldsize, usize_min,
|
||||||
zero);
|
usize_max, zero);
|
||||||
arena_decay_tick(tsd, huge_aalloc(ptr));
|
arena_decay_tick(tsd, huge_aalloc(ptr));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Attempt to shrink the allocation in-place. */
|
/* Attempt to shrink the allocation in-place. */
|
||||||
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
|
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
|
||||||
if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
|
if (!huge_ralloc_no_move_shrink(tsd, ptr, oldsize, usize_max)) {
|
||||||
arena_decay_tick(tsd, huge_aalloc(ptr));
|
arena_decay_tick(tsd, huge_aalloc(ptr));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -376,13 +378,13 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
|||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
huge_node_unset(ptr, node);
|
huge_node_unset(ptr, node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
ql_remove(&arena->huge, node, ql_link);
|
ql_remove(&arena->huge, node, ql_link);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
huge_dalloc_junk(extent_node_addr_get(node),
|
huge_dalloc_junk(tsd, extent_node_addr_get(node),
|
||||||
extent_node_size_get(node));
|
extent_node_size_get(node));
|
||||||
arena_chunk_dalloc_huge(extent_node_arena_get(node),
|
arena_chunk_dalloc_huge(tsd, extent_node_arena_get(node),
|
||||||
extent_node_addr_get(node), extent_node_size_get(node));
|
extent_node_addr_get(node), extent_node_size_get(node));
|
||||||
idalloctm(tsd, node, tcache, true, true);
|
idalloctm(tsd, node, tcache, true, true);
|
||||||
|
|
||||||
@ -397,7 +399,7 @@ huge_aalloc(const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
huge_salloc(const void *ptr)
|
huge_salloc(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
size_t size;
|
size_t size;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
@ -405,15 +407,15 @@ huge_salloc(const void *ptr)
|
|||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
size = extent_node_size_get(node);
|
size = extent_node_size_get(node);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
return (size);
|
return (size);
|
||||||
}
|
}
|
||||||
|
|
||||||
prof_tctx_t *
|
prof_tctx_t *
|
||||||
huge_prof_tctx_get(const void *ptr)
|
huge_prof_tctx_get(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
prof_tctx_t *tctx;
|
prof_tctx_t *tctx;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
@ -421,29 +423,29 @@ huge_prof_tctx_get(const void *ptr)
|
|||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
tctx = extent_node_prof_tctx_get(node);
|
tctx = extent_node_prof_tctx_get(node);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
|
|
||||||
return (tctx);
|
return (tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
huge_prof_tctx_set(tsd_t *tsd, const void *ptr, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(tsd, &arena->huge_mtx);
|
||||||
extent_node_prof_tctx_set(node, tctx);
|
extent_node_prof_tctx_set(node, tctx);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(tsd, &arena->huge_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_prof_tctx_reset(const void *ptr)
|
huge_prof_tctx_reset(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
huge_prof_tctx_set(tsd, ptr, (prof_tctx_t *)(uintptr_t)1U);
|
||||||
}
|
}
|
||||||
|
385
src/jemalloc.c
385
src/jemalloc.c
File diff suppressed because it is too large
Load Diff
21
src/mutex.c
21
src/mutex.c
@ -69,7 +69,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool
|
bool
|
||||||
malloc_mutex_init(malloc_mutex_t *mutex)
|
malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
@ -103,31 +103,34 @@ malloc_mutex_init(malloc_mutex_t *mutex)
|
|||||||
}
|
}
|
||||||
pthread_mutexattr_destroy(&attr);
|
pthread_mutexattr_destroy(&attr);
|
||||||
#endif
|
#endif
|
||||||
|
if (config_debug)
|
||||||
|
witness_init(&mutex->witness, name, rank, NULL);
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
malloc_mutex_prefork(malloc_mutex_t *mutex)
|
malloc_mutex_prefork(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_lock(mutex);
|
malloc_mutex_lock(tsd, mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
|
malloc_mutex_postfork_parent(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_unlock(mutex);
|
malloc_mutex_unlock(tsd, mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
malloc_mutex_postfork_child(tsd_t *tsd, malloc_mutex_t *mutex)
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
malloc_mutex_unlock(mutex);
|
malloc_mutex_unlock(tsd, mutex);
|
||||||
#else
|
#else
|
||||||
if (malloc_mutex_init(mutex)) {
|
if (malloc_mutex_init(mutex, mutex->witness.name,
|
||||||
|
mutex->witness.rank)) {
|
||||||
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
malloc_printf("<jemalloc>: Error re-initializing mutex in "
|
||||||
"child\n");
|
"child\n");
|
||||||
if (opt_abort)
|
if (opt_abort)
|
||||||
@ -137,7 +140,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
mutex_boot(void)
|
malloc_mutex_boot(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifdef JEMALLOC_MUTEX_INIT_CB
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
||||||
|
497
src/prof.c
497
src/prof.c
File diff suppressed because it is too large
Load Diff
@ -99,7 +99,7 @@ static void
|
|||||||
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
|
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
|
||||||
{
|
{
|
||||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
assert(obj->usize == isalloc(tsd, obj->ptr, config_prof));
|
||||||
idalloctm(tsd, obj->ptr, NULL, false, true);
|
idalloctm(tsd, obj->ptr, NULL, false, true);
|
||||||
quarantine->curbytes -= obj->usize;
|
quarantine->curbytes -= obj->usize;
|
||||||
quarantine->curobjs--;
|
quarantine->curobjs--;
|
||||||
@ -119,7 +119,7 @@ void
|
|||||||
quarantine(tsd_t *tsd, void *ptr)
|
quarantine(tsd_t *tsd, void *ptr)
|
||||||
{
|
{
|
||||||
quarantine_t *quarantine;
|
quarantine_t *quarantine;
|
||||||
size_t usize = isalloc(ptr, config_prof);
|
size_t usize = isalloc(tsd, ptr, config_prof);
|
||||||
|
|
||||||
cassert(config_fill);
|
cassert(config_fill);
|
||||||
assert(opt_quarantine);
|
assert(opt_quarantine);
|
||||||
|
91
src/tcache.c
91
src/tcache.c
@ -24,10 +24,10 @@ static tcaches_t *tcaches_avail;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
tcache_salloc(const void *ptr)
|
tcache_salloc(tsd_t *tsd, const void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (arena_salloc(ptr, false));
|
return (arena_salloc(tsd, ptr, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -107,12 +107,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||||
|
|
||||||
if (config_prof && bin_arena == arena) {
|
if (config_prof && bin_arena == arena) {
|
||||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
if (arena_prof_accum(tsd, arena,
|
||||||
prof_idump();
|
tcache->prof_accumbytes))
|
||||||
|
prof_idump(tsd);
|
||||||
tcache->prof_accumbytes = 0;
|
tcache->prof_accumbytes = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(tsd, &bin->lock);
|
||||||
if (config_stats && bin_arena == arena) {
|
if (config_stats && bin_arena == arena) {
|
||||||
assert(!merged_stats);
|
assert(!merged_stats);
|
||||||
merged_stats = true;
|
merged_stats = true;
|
||||||
@ -130,8 +131,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
(uintptr_t)chunk) >> LG_PAGE;
|
(uintptr_t)chunk) >> LG_PAGE;
|
||||||
arena_chunk_map_bits_t *bitselm =
|
arena_chunk_map_bits_t *bitselm =
|
||||||
arena_bitselm_get_mutable(chunk, pageind);
|
arena_bitselm_get_mutable(chunk, pageind);
|
||||||
arena_dalloc_bin_junked_locked(bin_arena, chunk,
|
arena_dalloc_bin_junked_locked(tsd, bin_arena,
|
||||||
ptr, bitselm);
|
chunk, ptr, bitselm);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -143,7 +144,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(tsd, &bin->lock);
|
||||||
arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
|
arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
|
||||||
}
|
}
|
||||||
if (config_stats && !merged_stats) {
|
if (config_stats && !merged_stats) {
|
||||||
@ -152,11 +153,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
*/
|
*/
|
||||||
arena_bin_t *bin = &arena->bins[binind];
|
arena_bin_t *bin = &arena->bins[binind];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(tsd, &bin->lock);
|
||||||
bin->stats.nflushes++;
|
bin->stats.nflushes++;
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(tsd, &bin->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||||
@ -189,7 +190,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
|
|
||||||
if (config_prof)
|
if (config_prof)
|
||||||
idump = false;
|
idump = false;
|
||||||
malloc_mutex_lock(&locked_arena->lock);
|
malloc_mutex_lock(tsd, &locked_arena->lock);
|
||||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
idump = arena_prof_accum_locked(arena,
|
idump = arena_prof_accum_locked(arena,
|
||||||
@ -212,8 +213,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (extent_node_arena_get(&chunk->node) ==
|
if (extent_node_arena_get(&chunk->node) ==
|
||||||
locked_arena) {
|
locked_arena) {
|
||||||
arena_dalloc_large_junked_locked(locked_arena,
|
arena_dalloc_large_junked_locked(tsd,
|
||||||
chunk, ptr);
|
locked_arena, chunk, ptr);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* This object was allocated via a different
|
* This object was allocated via a different
|
||||||
@ -225,9 +226,9 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
ndeferred++;
|
ndeferred++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
malloc_mutex_unlock(&locked_arena->lock);
|
malloc_mutex_unlock(tsd, &locked_arena->lock);
|
||||||
if (config_prof && idump)
|
if (config_prof && idump)
|
||||||
prof_idump();
|
prof_idump(tsd);
|
||||||
arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
|
arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
|
||||||
}
|
}
|
||||||
if (config_stats && !merged_stats) {
|
if (config_stats && !merged_stats) {
|
||||||
@ -235,12 +236,12 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
* The flush loop didn't happen to flush to this thread's
|
* The flush loop didn't happen to flush to this thread's
|
||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
*/
|
*/
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(tsd, &arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(tsd, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
|
||||||
@ -251,33 +252,34 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
tcache_arena_associate(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/* Link into list of extant tcaches. */
|
/* Link into list of extant tcaches. */
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(tsd, &arena->lock);
|
||||||
ql_elm_new(tcache, link);
|
ql_elm_new(tcache, link);
|
||||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(tsd, &arena->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
|
tcache_arena_reassociate(tsd_t *tsd, tcache_t *tcache, arena_t *oldarena,
|
||||||
|
arena_t *newarena)
|
||||||
{
|
{
|
||||||
|
|
||||||
tcache_arena_dissociate(tcache, oldarena);
|
tcache_arena_dissociate(tsd, tcache, oldarena);
|
||||||
tcache_arena_associate(tcache, newarena);
|
tcache_arena_associate(tsd, tcache, newarena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
tcache_arena_dissociate(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/* Unlink from list of extant tcaches. */
|
/* Unlink from list of extant tcaches. */
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(tsd, &arena->lock);
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
bool in_ql = false;
|
bool in_ql = false;
|
||||||
tcache_t *iter;
|
tcache_t *iter;
|
||||||
@ -290,8 +292,8 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
|||||||
assert(in_ql);
|
assert(in_ql);
|
||||||
}
|
}
|
||||||
ql_remove(&arena->tcache_ql, tcache, link);
|
ql_remove(&arena->tcache_ql, tcache, link);
|
||||||
tcache_stats_merge(tcache, arena);
|
tcache_stats_merge(tsd, tcache, arena);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(tsd, &arena->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -327,11 +329,11 @@ tcache_create(tsd_t *tsd, arena_t *arena)
|
|||||||
size = sa2u(size, CACHELINE);
|
size = sa2u(size, CACHELINE);
|
||||||
|
|
||||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
|
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
|
||||||
arena_get(0, false));
|
arena_get(tsd, 0, false));
|
||||||
if (tcache == NULL)
|
if (tcache == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
tcache_arena_associate(tcache, arena);
|
tcache_arena_associate(tsd, tcache, arena);
|
||||||
|
|
||||||
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
|
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
|
||||||
|
|
||||||
@ -358,7 +360,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
arena = arena_choose(tsd, NULL);
|
arena = arena_choose(tsd, NULL);
|
||||||
tcache_arena_dissociate(tcache, arena);
|
tcache_arena_dissociate(tsd, tcache, arena);
|
||||||
|
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
@ -366,9 +368,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
|||||||
|
|
||||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
arena_bin_t *bin = &arena->bins[i];
|
arena_bin_t *bin = &arena->bins[i];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(tsd, &bin->lock);
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(tsd, &bin->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -377,17 +379,17 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
|||||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||||
|
|
||||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(tsd, &arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[i - NBINS].nrequests +=
|
arena->stats.lstats[i - NBINS].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(tsd, &arena->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||||
arena_prof_accum(arena, tcache->prof_accumbytes))
|
arena_prof_accum(tsd, arena, tcache->prof_accumbytes))
|
||||||
prof_idump();
|
prof_idump(tsd);
|
||||||
|
|
||||||
idalloctm(tsd, tcache, false, true, true);
|
idalloctm(tsd, tcache, false, true, true);
|
||||||
}
|
}
|
||||||
@ -413,21 +415,22 @@ tcache_enabled_cleanup(tsd_t *tsd)
|
|||||||
/* Do nothing. */
|
/* Do nothing. */
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Caller must own arena->lock. */
|
|
||||||
void
|
void
|
||||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
tcache_stats_merge(tsd_t *tsd, tcache_t *tcache, arena_t *arena)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
cassert(config_stats);
|
cassert(config_stats);
|
||||||
|
|
||||||
|
malloc_mutex_assert_owner(tsd, &arena->lock);
|
||||||
|
|
||||||
/* Merge and reset tcache stats. */
|
/* Merge and reset tcache stats. */
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
arena_bin_t *bin = &arena->bins[i];
|
arena_bin_t *bin = &arena->bins[i];
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(tsd, &bin->lock);
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(tsd, &bin->lock);
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -447,7 +450,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
|||||||
tcaches_t *elm;
|
tcaches_t *elm;
|
||||||
|
|
||||||
if (tcaches == NULL) {
|
if (tcaches == NULL) {
|
||||||
tcaches = base_alloc(sizeof(tcache_t *) *
|
tcaches = base_alloc(tsd, sizeof(tcache_t *) *
|
||||||
(MALLOCX_TCACHE_MAX+1));
|
(MALLOCX_TCACHE_MAX+1));
|
||||||
if (tcaches == NULL)
|
if (tcaches == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
@ -455,7 +458,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
|||||||
|
|
||||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
||||||
return (true);
|
return (true);
|
||||||
tcache = tcache_create(tsd, arena_get(0, false));
|
tcache = tcache_create(tsd, arena_get(tsd, 0, false));
|
||||||
if (tcache == NULL)
|
if (tcache == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
@ -501,7 +504,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
tcache_boot(void)
|
tcache_boot(tsd_t *tsd)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -519,7 +522,7 @@ tcache_boot(void)
|
|||||||
nhbins = size2index(tcache_maxclass) + 1;
|
nhbins = size2index(tcache_maxclass) + 1;
|
||||||
|
|
||||||
/* Initialize tcache_bin_info. */
|
/* Initialize tcache_bin_info. */
|
||||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsd, nhbins *
|
||||||
sizeof(tcache_bin_info_t));
|
sizeof(tcache_bin_info_t));
|
||||||
if (tcache_bin_info == NULL)
|
if (tcache_bin_info == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
20
src/tsd.c
20
src/tsd.c
@ -106,15 +106,17 @@ MALLOC_TSD
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
tsd_t *
|
||||||
malloc_tsd_boot0(void)
|
malloc_tsd_boot0(void)
|
||||||
{
|
{
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
ncleanups = 0;
|
ncleanups = 0;
|
||||||
if (tsd_boot0())
|
if (tsd_boot0())
|
||||||
return (true);
|
return (NULL);
|
||||||
*tsd_arenas_tdata_bypassp_get(tsd_fetch()) = true;
|
tsd = tsd_fetch();
|
||||||
return (false);
|
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
||||||
|
return (tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -169,10 +171,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
|
|||||||
tsd_init_block_t *iter;
|
tsd_init_block_t *iter;
|
||||||
|
|
||||||
/* Check whether this thread has already inserted into the list. */
|
/* Check whether this thread has already inserted into the list. */
|
||||||
malloc_mutex_lock(&head->lock);
|
malloc_mutex_lock(NULL, &head->lock);
|
||||||
ql_foreach(iter, &head->blocks, link) {
|
ql_foreach(iter, &head->blocks, link) {
|
||||||
if (iter->thread == self) {
|
if (iter->thread == self) {
|
||||||
malloc_mutex_unlock(&head->lock);
|
malloc_mutex_unlock(NULL, &head->lock);
|
||||||
return (iter->data);
|
return (iter->data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -180,7 +182,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
|
|||||||
ql_elm_new(block, link);
|
ql_elm_new(block, link);
|
||||||
block->thread = self;
|
block->thread = self;
|
||||||
ql_tail_insert(&head->blocks, block, link);
|
ql_tail_insert(&head->blocks, block, link);
|
||||||
malloc_mutex_unlock(&head->lock);
|
malloc_mutex_unlock(NULL, &head->lock);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,8 +190,8 @@ void
|
|||||||
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
|
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
|
||||||
{
|
{
|
||||||
|
|
||||||
malloc_mutex_lock(&head->lock);
|
malloc_mutex_lock(NULL, &head->lock);
|
||||||
ql_remove(&head->blocks, block, link);
|
ql_remove(&head->blocks, block, link);
|
||||||
malloc_mutex_unlock(&head->lock);
|
malloc_mutex_unlock(NULL, &head->lock);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
206
src/witness.c
Normal file
206
src/witness.c
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
#define JEMALLOC_WITNESS_C_
|
||||||
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
|
void
|
||||||
|
witness_init(witness_t *witness, const char *name, witness_rank_t rank,
|
||||||
|
witness_comp_t *comp)
|
||||||
|
{
|
||||||
|
|
||||||
|
witness->name = name;
|
||||||
|
witness->rank = rank;
|
||||||
|
witness->comp = comp;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_lock_error
|
||||||
|
#define witness_lock_error JEMALLOC_N(witness_lock_error_impl)
|
||||||
|
#endif
|
||||||
|
static void
|
||||||
|
witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
|
||||||
|
{
|
||||||
|
witness_t *w;
|
||||||
|
|
||||||
|
malloc_printf("<jemalloc>: Lock rank order reversal:");
|
||||||
|
ql_foreach(w, witnesses, link) {
|
||||||
|
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||||
|
}
|
||||||
|
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_lock_error
|
||||||
|
#define witness_lock_error JEMALLOC_N(witness_lock_error)
|
||||||
|
witness_lock_error_t *witness_lock_error = JEMALLOC_N(witness_lock_error_impl);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void
|
||||||
|
witness_lock(tsd_t *tsd, witness_t *witness)
|
||||||
|
{
|
||||||
|
witness_list_t *witnesses;
|
||||||
|
witness_t *w;
|
||||||
|
|
||||||
|
cassert(config_debug);
|
||||||
|
|
||||||
|
if (tsd == NULL)
|
||||||
|
return;
|
||||||
|
if (witness->rank == WITNESS_RANK_OMIT)
|
||||||
|
return;
|
||||||
|
|
||||||
|
witness_assert_not_owner(tsd, witness);
|
||||||
|
|
||||||
|
witnesses = tsd_witnessesp_get(tsd);
|
||||||
|
w = ql_last(witnesses, link);
|
||||||
|
if (w != NULL && w->rank >= witness->rank && (w->comp == NULL ||
|
||||||
|
w->comp != witness->comp || w->comp(w, witness) > 0))
|
||||||
|
witness_lock_error(witnesses, witness);
|
||||||
|
|
||||||
|
ql_elm_new(witness, link);
|
||||||
|
ql_tail_insert(witnesses, witness, link);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
witness_unlock(tsd_t *tsd, witness_t *witness)
|
||||||
|
{
|
||||||
|
witness_list_t *witnesses;
|
||||||
|
|
||||||
|
cassert(config_debug);
|
||||||
|
|
||||||
|
if (tsd == NULL)
|
||||||
|
return;
|
||||||
|
if (witness->rank == WITNESS_RANK_OMIT)
|
||||||
|
return;
|
||||||
|
|
||||||
|
witness_assert_owner(tsd, witness);
|
||||||
|
|
||||||
|
witnesses = tsd_witnessesp_get(tsd);
|
||||||
|
ql_remove(witnesses, witness, link);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_owner_error
|
||||||
|
#define witness_owner_error JEMALLOC_N(witness_owner_error_impl)
|
||||||
|
#endif
|
||||||
|
static void
|
||||||
|
witness_owner_error(const witness_t *witness)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
|
||||||
|
witness->rank);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_owner_error
|
||||||
|
#define witness_owner_error JEMALLOC_N(witness_owner_error)
|
||||||
|
witness_owner_error_t *witness_owner_error =
|
||||||
|
JEMALLOC_N(witness_owner_error_impl);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void
|
||||||
|
witness_assert_owner(tsd_t *tsd, const witness_t *witness)
|
||||||
|
{
|
||||||
|
witness_list_t *witnesses;
|
||||||
|
witness_t *w;
|
||||||
|
|
||||||
|
cassert(config_debug);
|
||||||
|
|
||||||
|
if (tsd == NULL)
|
||||||
|
return;
|
||||||
|
if (witness->rank == WITNESS_RANK_OMIT)
|
||||||
|
return;
|
||||||
|
|
||||||
|
witnesses = tsd_witnessesp_get(tsd);
|
||||||
|
ql_foreach(w, witnesses, link) {
|
||||||
|
if (w == witness)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
witness_owner_error(witness);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_not_owner_error
|
||||||
|
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error_impl)
|
||||||
|
#endif
|
||||||
|
static void
|
||||||
|
witness_not_owner_error(const witness_t *witness)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
|
||||||
|
witness->rank);
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_not_owner_error
|
||||||
|
#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
|
||||||
|
witness_not_owner_error_t *witness_not_owner_error =
|
||||||
|
JEMALLOC_N(witness_not_owner_error_impl);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void
|
||||||
|
witness_assert_not_owner(tsd_t *tsd, const witness_t *witness)
|
||||||
|
{
|
||||||
|
witness_list_t *witnesses;
|
||||||
|
witness_t *w;
|
||||||
|
|
||||||
|
cassert(config_debug);
|
||||||
|
|
||||||
|
if (tsd == NULL)
|
||||||
|
return;
|
||||||
|
if (witness->rank == WITNESS_RANK_OMIT)
|
||||||
|
return;
|
||||||
|
|
||||||
|
witnesses = tsd_witnessesp_get(tsd);
|
||||||
|
ql_foreach(w, witnesses, link) {
|
||||||
|
if (w == witness)
|
||||||
|
witness_not_owner_error(witness);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_lockless_error
|
||||||
|
#define witness_lockless_error JEMALLOC_N(witness_lockless_error_impl)
|
||||||
|
#endif
|
||||||
|
static void
|
||||||
|
witness_lockless_error(const witness_list_t *witnesses)
|
||||||
|
{
|
||||||
|
witness_t *w;
|
||||||
|
|
||||||
|
malloc_printf("<jemalloc>: Should not own any locks:");
|
||||||
|
ql_foreach(w, witnesses, link) {
|
||||||
|
malloc_printf(" %s(%u)", w->name, w->rank);
|
||||||
|
}
|
||||||
|
malloc_printf("\n");
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
#ifdef JEMALLOC_JET
|
||||||
|
#undef witness_lockless_error
|
||||||
|
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
|
||||||
|
witness_lockless_error_t *witness_lockless_error =
|
||||||
|
JEMALLOC_N(witness_lockless_error_impl);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void
|
||||||
|
witness_assert_lockless(tsd_t *tsd)
|
||||||
|
{
|
||||||
|
witness_list_t *witnesses;
|
||||||
|
witness_t *w;
|
||||||
|
|
||||||
|
cassert(config_debug);
|
||||||
|
|
||||||
|
if (tsd == NULL)
|
||||||
|
return;
|
||||||
|
|
||||||
|
witnesses = tsd_witnessesp_get(tsd);
|
||||||
|
w = ql_last(witnesses, link);
|
||||||
|
if (w != NULL) {
|
||||||
|
witness_lockless_error(witnesses);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
witnesses_cleanup(tsd_t *tsd)
|
||||||
|
{
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
/* Do nothing. */
|
||||||
|
}
|
@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
|
|||||||
* not work in practice, we must check all pointers to assure that they
|
* not work in practice, we must check all pointers to assure that they
|
||||||
* reside within a mapped chunk before determining size.
|
* reside within a mapped chunk before determining size.
|
||||||
*/
|
*/
|
||||||
return (ivsalloc(ptr, config_prof));
|
return (ivsalloc(tsd_fetch(), ptr, config_prof));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -87,7 +87,7 @@ static void
|
|||||||
zone_free(malloc_zone_t *zone, void *ptr)
|
zone_free(malloc_zone_t *zone, void *ptr)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (ivsalloc(ptr, config_prof) != 0) {
|
if (ivsalloc(tsd_fetch(), ptr, config_prof) != 0) {
|
||||||
je_free(ptr);
|
je_free(ptr);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -99,7 +99,7 @@ static void *
|
|||||||
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (ivsalloc(ptr, config_prof) != 0)
|
if (ivsalloc(tsd_fetch(), ptr, config_prof) != 0)
|
||||||
return (je_realloc(ptr, size));
|
return (je_realloc(ptr, size));
|
||||||
|
|
||||||
return (realloc(ptr, size));
|
return (realloc(ptr, size));
|
||||||
@ -123,7 +123,7 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
|||||||
{
|
{
|
||||||
size_t alloc_size;
|
size_t alloc_size;
|
||||||
|
|
||||||
alloc_size = ivsalloc(ptr, config_prof);
|
alloc_size = ivsalloc(tsd_fetch(), ptr, config_prof);
|
||||||
if (alloc_size != 0) {
|
if (alloc_size != 0) {
|
||||||
assert(alloc_size == size);
|
assert(alloc_size == size);
|
||||||
je_free(ptr);
|
je_free(ptr);
|
||||||
|
@ -53,10 +53,10 @@ arena_dalloc_junk_large_intercept(void *ptr, size_t usize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_dalloc_junk_intercept(void *ptr, size_t usize)
|
huge_dalloc_junk_intercept(tsd_t *tsd, void *ptr, size_t usize)
|
||||||
{
|
{
|
||||||
|
|
||||||
huge_dalloc_junk_orig(ptr, usize);
|
huge_dalloc_junk_orig(tsd, ptr, usize);
|
||||||
/*
|
/*
|
||||||
* The conditions under which junk filling actually occurs are nuanced
|
* The conditions under which junk filling actually occurs are nuanced
|
||||||
* enough that it doesn't make sense to duplicate the decision logic in
|
* enough that it doesn't make sense to duplicate the decision logic in
|
||||||
|
@ -94,7 +94,8 @@ TEST_END
|
|||||||
bool prof_dump_header_intercepted = false;
|
bool prof_dump_header_intercepted = false;
|
||||||
prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
|
prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
|
||||||
static bool
|
static bool
|
||||||
prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
|
prof_dump_header_intercept(tsd_t *tsd, bool propagate_err,
|
||||||
|
const prof_cnt_t *cnt_all)
|
||||||
{
|
{
|
||||||
|
|
||||||
prof_dump_header_intercepted = true;
|
prof_dump_header_intercepted = true;
|
||||||
|
278
test/unit/witness.c
Normal file
278
test/unit/witness.c
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
|
static witness_lock_error_t *witness_lock_error_orig;
|
||||||
|
static witness_owner_error_t *witness_owner_error_orig;
|
||||||
|
static witness_not_owner_error_t *witness_not_owner_error_orig;
|
||||||
|
static witness_lockless_error_t *witness_lockless_error_orig;
|
||||||
|
|
||||||
|
static bool saw_lock_error;
|
||||||
|
static bool saw_owner_error;
|
||||||
|
static bool saw_not_owner_error;
|
||||||
|
static bool saw_lockless_error;
|
||||||
|
|
||||||
|
static void
|
||||||
|
witness_lock_error_intercept(const witness_list_t *witnesses,
|
||||||
|
const witness_t *witness)
|
||||||
|
{
|
||||||
|
|
||||||
|
saw_lock_error = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
witness_owner_error_intercept(const witness_t *witness)
|
||||||
|
{
|
||||||
|
|
||||||
|
saw_owner_error = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
witness_not_owner_error_intercept(const witness_t *witness)
|
||||||
|
{
|
||||||
|
|
||||||
|
saw_not_owner_error = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
witness_lockless_error_intercept(const witness_list_t *witnesses)
|
||||||
|
{
|
||||||
|
|
||||||
|
saw_lockless_error = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
witness_comp(const witness_t *a, const witness_t *b)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
|
||||||
|
|
||||||
|
return (strcmp(a->name, b->name));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
witness_comp_reverse(const witness_t *a, const witness_t *b)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank");
|
||||||
|
|
||||||
|
return (-strcmp(a->name, b->name));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_BEGIN(test_witness)
|
||||||
|
{
|
||||||
|
witness_t a, b;
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
|
test_skip_if(!config_debug);
|
||||||
|
|
||||||
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_init(&a, "a", 1, NULL);
|
||||||
|
witness_assert_not_owner(tsd, &a);
|
||||||
|
witness_lock(tsd, &a);
|
||||||
|
witness_assert_owner(tsd, &a);
|
||||||
|
|
||||||
|
witness_init(&b, "b", 2, NULL);
|
||||||
|
witness_assert_not_owner(tsd, &b);
|
||||||
|
witness_lock(tsd, &b);
|
||||||
|
witness_assert_owner(tsd, &b);
|
||||||
|
|
||||||
|
witness_unlock(tsd, &a);
|
||||||
|
witness_unlock(tsd, &b);
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_witness_comp)
|
||||||
|
{
|
||||||
|
witness_t a, b, c, d;
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
|
test_skip_if(!config_debug);
|
||||||
|
|
||||||
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_init(&a, "a", 1, witness_comp);
|
||||||
|
witness_assert_not_owner(tsd, &a);
|
||||||
|
witness_lock(tsd, &a);
|
||||||
|
witness_assert_owner(tsd, &a);
|
||||||
|
|
||||||
|
witness_init(&b, "b", 1, witness_comp);
|
||||||
|
witness_assert_not_owner(tsd, &b);
|
||||||
|
witness_lock(tsd, &b);
|
||||||
|
witness_assert_owner(tsd, &b);
|
||||||
|
witness_unlock(tsd, &b);
|
||||||
|
|
||||||
|
witness_lock_error_orig = witness_lock_error;
|
||||||
|
witness_lock_error = witness_lock_error_intercept;
|
||||||
|
saw_lock_error = false;
|
||||||
|
|
||||||
|
witness_init(&c, "c", 1, witness_comp_reverse);
|
||||||
|
witness_assert_not_owner(tsd, &c);
|
||||||
|
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||||
|
witness_lock(tsd, &c);
|
||||||
|
assert_true(saw_lock_error, "Expected witness lock error");
|
||||||
|
witness_unlock(tsd, &c);
|
||||||
|
|
||||||
|
saw_lock_error = false;
|
||||||
|
|
||||||
|
witness_init(&d, "d", 1, NULL);
|
||||||
|
witness_assert_not_owner(tsd, &d);
|
||||||
|
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||||
|
witness_lock(tsd, &d);
|
||||||
|
assert_true(saw_lock_error, "Expected witness lock error");
|
||||||
|
witness_unlock(tsd, &d);
|
||||||
|
|
||||||
|
witness_unlock(tsd, &a);
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_lock_error = witness_lock_error_orig;
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_witness_reversal)
|
||||||
|
{
|
||||||
|
witness_t a, b;
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
|
test_skip_if(!config_debug);
|
||||||
|
|
||||||
|
witness_lock_error_orig = witness_lock_error;
|
||||||
|
witness_lock_error = witness_lock_error_intercept;
|
||||||
|
saw_lock_error = false;
|
||||||
|
|
||||||
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_init(&a, "a", 1, NULL);
|
||||||
|
witness_init(&b, "b", 2, NULL);
|
||||||
|
|
||||||
|
witness_lock(tsd, &b);
|
||||||
|
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||||
|
witness_lock(tsd, &a);
|
||||||
|
assert_true(saw_lock_error, "Expected witness lock error");
|
||||||
|
|
||||||
|
witness_unlock(tsd, &a);
|
||||||
|
witness_unlock(tsd, &b);
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_lock_error = witness_lock_error_orig;
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_witness_recursive)
|
||||||
|
{
|
||||||
|
witness_t a;
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
|
test_skip_if(!config_debug);
|
||||||
|
|
||||||
|
witness_not_owner_error_orig = witness_not_owner_error;
|
||||||
|
witness_not_owner_error = witness_not_owner_error_intercept;
|
||||||
|
saw_not_owner_error = false;
|
||||||
|
|
||||||
|
witness_lock_error_orig = witness_lock_error;
|
||||||
|
witness_lock_error = witness_lock_error_intercept;
|
||||||
|
saw_lock_error = false;
|
||||||
|
|
||||||
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_init(&a, "a", 1, NULL);
|
||||||
|
|
||||||
|
witness_lock(tsd, &a);
|
||||||
|
assert_false(saw_lock_error, "Unexpected witness lock error");
|
||||||
|
assert_false(saw_not_owner_error, "Unexpected witness not owner error");
|
||||||
|
witness_lock(tsd, &a);
|
||||||
|
assert_true(saw_lock_error, "Expected witness lock error");
|
||||||
|
assert_true(saw_not_owner_error, "Expected witness not owner error");
|
||||||
|
|
||||||
|
witness_unlock(tsd, &a);
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_owner_error = witness_owner_error_orig;
|
||||||
|
witness_lock_error = witness_lock_error_orig;
|
||||||
|
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_witness_unlock_not_owned)
|
||||||
|
{
|
||||||
|
witness_t a;
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
|
test_skip_if(!config_debug);
|
||||||
|
|
||||||
|
witness_owner_error_orig = witness_owner_error;
|
||||||
|
witness_owner_error = witness_owner_error_intercept;
|
||||||
|
saw_owner_error = false;
|
||||||
|
|
||||||
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_init(&a, "a", 1, NULL);
|
||||||
|
|
||||||
|
assert_false(saw_owner_error, "Unexpected owner error");
|
||||||
|
witness_unlock(tsd, &a);
|
||||||
|
assert_true(saw_owner_error, "Expected owner error");
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_owner_error = witness_owner_error_orig;
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_witness_lockful)
|
||||||
|
{
|
||||||
|
witness_t a;
|
||||||
|
tsd_t *tsd;
|
||||||
|
|
||||||
|
test_skip_if(!config_debug);
|
||||||
|
|
||||||
|
witness_lockless_error_orig = witness_lockless_error;
|
||||||
|
witness_lockless_error = witness_lockless_error_intercept;
|
||||||
|
saw_lockless_error = false;
|
||||||
|
|
||||||
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_init(&a, "a", 1, NULL);
|
||||||
|
|
||||||
|
assert_false(saw_lockless_error, "Unexpected lockless error");
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_lock(tsd, &a);
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
assert_true(saw_lockless_error, "Expected lockless error");
|
||||||
|
|
||||||
|
witness_unlock(tsd, &a);
|
||||||
|
|
||||||
|
witness_assert_lockless(tsd);
|
||||||
|
|
||||||
|
witness_lockless_error = witness_lockless_error_orig;
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
int
|
||||||
|
main(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (test(
|
||||||
|
test_witness,
|
||||||
|
test_witness_comp,
|
||||||
|
test_witness_reversal,
|
||||||
|
test_witness_recursive,
|
||||||
|
test_witness_unlock_not_owned,
|
||||||
|
test_witness_lockful));
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user