Reduce cpp conditional logic complexity.
Convert configuration-related cpp conditional logic to use static constant variables, e.g.: #ifdef JEMALLOC_DEBUG [...] #endif becomes: if (config_debug) { [...] } The advantage is clearer, more concise code. The main disadvantage is that data structures no longer have conditionally defined fields, so they pay the cost of all fields regardless of whether they are used. In practice, this is only a minor concern; config_stats will go away in an upcoming change, and config_prof is the only other major feature that depends on more than a few special-purpose fields.
This commit is contained in:
parent
b3bd885090
commit
7372b15a31
@ -174,6 +174,9 @@ AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
|
|||||||
LD_PRELOAD_VAR="LD_PRELOAD"
|
LD_PRELOAD_VAR="LD_PRELOAD"
|
||||||
so="so"
|
so="so"
|
||||||
|
|
||||||
|
dnl Heap profiling uses the log(3) function.
|
||||||
|
LIBS="$LIBS -lm"
|
||||||
|
|
||||||
dnl Platform-specific settings. abi and RPATH can probably be determined
|
dnl Platform-specific settings. abi and RPATH can probably be determined
|
||||||
dnl programmatically, but doing so is error-prone, which makes it generally
|
dnl programmatically, but doing so is error-prone, which makes it generally
|
||||||
dnl not worth the trouble.
|
dnl not worth the trouble.
|
||||||
@ -553,7 +556,6 @@ fi
|
|||||||
AC_MSG_CHECKING([configured backtracing method])
|
AC_MSG_CHECKING([configured backtracing method])
|
||||||
AC_MSG_RESULT([$backtrace_method])
|
AC_MSG_RESULT([$backtrace_method])
|
||||||
if test "x$enable_prof" = "x1" ; then
|
if test "x$enable_prof" = "x1" ; then
|
||||||
LIBS="$LIBS -lm"
|
|
||||||
AC_DEFINE([JEMALLOC_PROF], [ ])
|
AC_DEFINE([JEMALLOC_PROF], [ ])
|
||||||
fi
|
fi
|
||||||
AC_SUBST([enable_prof])
|
AC_SUBST([enable_prof])
|
||||||
|
@ -16,11 +16,9 @@
|
|||||||
#define SUBPAGE_CEILING(s) \
|
#define SUBPAGE_CEILING(s) \
|
||||||
(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
|
(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
|
||||||
|
|
||||||
#ifdef JEMALLOC_TINY
|
/* Smallest size class to support. */
|
||||||
/* Smallest size class to support. */
|
#define LG_TINY_MIN LG_SIZEOF_PTR
|
||||||
# define LG_TINY_MIN LG_SIZEOF_PTR
|
#define TINY_MIN (1U << LG_TINY_MIN)
|
||||||
# define TINY_MIN (1U << LG_TINY_MIN)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum size class that is a multiple of the quantum, but not (necessarily)
|
* Maximum size class that is a multiple of the quantum, but not (necessarily)
|
||||||
@ -85,6 +83,15 @@ typedef struct arena_s arena_t;
|
|||||||
|
|
||||||
/* Each element of the chunk map corresponds to one page within the chunk. */
|
/* Each element of the chunk map corresponds to one page within the chunk. */
|
||||||
struct arena_chunk_map_s {
|
struct arena_chunk_map_s {
|
||||||
|
#ifndef JEMALLOC_PROF
|
||||||
|
/*
|
||||||
|
* Overlay prof_ctx in order to allow it to be referenced by dead code.
|
||||||
|
* Such antics aren't warranted for per arena data structures, but
|
||||||
|
* chunk map overhead accounts for a percentage of memory, rather than
|
||||||
|
* being just a fixed cost.
|
||||||
|
*/
|
||||||
|
union {
|
||||||
|
#endif
|
||||||
union {
|
union {
|
||||||
/*
|
/*
|
||||||
* Linkage for run trees. There are two disjoint uses:
|
* Linkage for run trees. There are two disjoint uses:
|
||||||
@ -103,9 +110,10 @@ struct arena_chunk_map_s {
|
|||||||
ql_elm(arena_chunk_map_t) ql_link;
|
ql_elm(arena_chunk_map_t) ql_link;
|
||||||
} u;
|
} u;
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/* Profile counters, used for large object runs. */
|
/* Profile counters, used for large object runs. */
|
||||||
prof_ctx_t *prof_ctx;
|
prof_ctx_t *prof_ctx;
|
||||||
|
#ifndef JEMALLOC_PROF
|
||||||
|
}; /* union { ... }; */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -162,10 +170,8 @@ struct arena_chunk_map_s {
|
|||||||
* ssssssss ssssssss ssss---- ----D-LA
|
* ssssssss ssssssss ssss---- ----D-LA
|
||||||
*/
|
*/
|
||||||
size_t bits;
|
size_t bits;
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
#define CHUNK_MAP_CLASS_SHIFT 4
|
#define CHUNK_MAP_CLASS_SHIFT 4
|
||||||
#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U)
|
#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U)
|
||||||
#endif
|
|
||||||
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
|
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
|
||||||
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
|
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
|
||||||
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
|
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
|
||||||
@ -205,10 +211,8 @@ struct arena_chunk_s {
|
|||||||
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
|
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
|
||||||
|
|
||||||
struct arena_run_s {
|
struct arena_run_s {
|
||||||
#ifdef JEMALLOC_DEBUG
|
|
||||||
uint32_t magic;
|
uint32_t magic;
|
||||||
# define ARENA_RUN_MAGIC 0x384adf93
|
# define ARENA_RUN_MAGIC 0x384adf93
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Bin this run is associated with. */
|
/* Bin this run is associated with. */
|
||||||
arena_bin_t *bin;
|
arena_bin_t *bin;
|
||||||
@ -247,13 +251,11 @@ struct arena_bin_info_s {
|
|||||||
*/
|
*/
|
||||||
bitmap_info_t bitmap_info;
|
bitmap_info_t bitmap_info;
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/*
|
/*
|
||||||
* Offset of first (prof_ctx_t *) in a run header for this bin's size
|
* Offset of first (prof_ctx_t *) in a run header for this bin's size
|
||||||
* class, or 0 if (opt_prof == false).
|
* class, or 0 if (config_prof == false || opt_prof == false).
|
||||||
*/
|
*/
|
||||||
uint32_t ctx0_offset;
|
uint32_t ctx0_offset;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Offset of first region in a run for this bin's size class. */
|
/* Offset of first region in a run for this bin's size class. */
|
||||||
uint32_t reg0_offset;
|
uint32_t reg0_offset;
|
||||||
@ -283,17 +285,13 @@ struct arena_bin_s {
|
|||||||
*/
|
*/
|
||||||
arena_run_tree_t runs;
|
arena_run_tree_t runs;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
/* Bin statistics. */
|
/* Bin statistics. */
|
||||||
malloc_bin_stats_t stats;
|
malloc_bin_stats_t stats;
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arena_s {
|
struct arena_s {
|
||||||
#ifdef JEMALLOC_DEBUG
|
|
||||||
uint32_t magic;
|
uint32_t magic;
|
||||||
# define ARENA_MAGIC 0x947d3d24
|
# define ARENA_MAGIC 0x947d3d24
|
||||||
#endif
|
|
||||||
|
|
||||||
/* This arena's index within the arenas array. */
|
/* This arena's index within the arenas array. */
|
||||||
unsigned ind;
|
unsigned ind;
|
||||||
@ -314,20 +312,14 @@ struct arena_s {
|
|||||||
*/
|
*/
|
||||||
malloc_mutex_t lock;
|
malloc_mutex_t lock;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
arena_stats_t stats;
|
arena_stats_t stats;
|
||||||
# ifdef JEMALLOC_TCACHE
|
|
||||||
/*
|
/*
|
||||||
* List of tcaches for extant threads associated with this arena.
|
* List of tcaches for extant threads associated with this arena.
|
||||||
* Stats from these are merged incrementally, and at exit.
|
* Stats from these are merged incrementally, and at exit.
|
||||||
*/
|
*/
|
||||||
ql_head(tcache_t) tcache_ql;
|
ql_head(tcache_t) tcache_ql;
|
||||||
# endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
uint64_t prof_accumbytes;
|
uint64_t prof_accumbytes;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* List of dirty-page-containing chunks this arena manages. */
|
/* List of dirty-page-containing chunks this arena manages. */
|
||||||
ql_head(arena_chunk_t) chunks_dirty;
|
ql_head(arena_chunk_t) chunks_dirty;
|
||||||
@ -455,35 +447,23 @@ extern size_t sspace_max;
|
|||||||
#define nlclasses (chunk_npages - map_bias)
|
#define nlclasses (chunk_npages - map_bias)
|
||||||
|
|
||||||
void arena_purge_all(arena_t *arena);
|
void arena_purge_all(arena_t *arena);
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
||||||
size_t binind
|
size_t binind, uint64_t prof_accumbytes);
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
, uint64_t prof_accumbytes
|
|
||||||
# endif
|
|
||||||
);
|
|
||||||
#endif
|
|
||||||
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
|
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
|
||||||
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
|
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
|
||||||
void *arena_malloc(size_t size, bool zero);
|
void *arena_malloc(size_t size, bool zero);
|
||||||
void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
|
void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
|
||||||
size_t alignment, bool zero);
|
size_t alignment, bool zero);
|
||||||
size_t arena_salloc(const void *ptr);
|
size_t arena_salloc(const void *ptr);
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
void arena_prof_promoted(const void *ptr, size_t size);
|
void arena_prof_promoted(const void *ptr, size_t size);
|
||||||
size_t arena_salloc_demote(const void *ptr);
|
size_t arena_salloc_demote(const void *ptr);
|
||||||
#endif
|
|
||||||
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||||
arena_chunk_map_t *mapelm);
|
arena_chunk_map_t *mapelm);
|
||||||
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
|
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
|
||||||
arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||||
malloc_large_stats_t *lstats);
|
malloc_large_stats_t *lstats);
|
||||||
#endif
|
|
||||||
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, bool zero);
|
size_t extra, bool zero);
|
||||||
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||||
@ -499,10 +479,8 @@ bool arena_boot(void);
|
|||||||
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||||
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||||
const void *ptr);
|
const void *ptr);
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
|
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
|
||||||
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||||
# endif
|
|
||||||
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -521,7 +499,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
|
|||||||
unsigned shift, diff, regind;
|
unsigned shift, diff, regind;
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
dassert(run->magic == ARENA_RUN_MAGIC);
|
assert(run->magic == ARENA_RUN_MAGIC);
|
||||||
/*
|
/*
|
||||||
* Freeing a pointer lower than region zero can cause assertion
|
* Freeing a pointer lower than region zero can cause assertion
|
||||||
* failure.
|
* failure.
|
||||||
@ -586,7 +564,6 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
|
|||||||
return (regind);
|
return (regind);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
JEMALLOC_INLINE prof_ctx_t *
|
JEMALLOC_INLINE prof_ctx_t *
|
||||||
arena_prof_ctx_get(const void *ptr)
|
arena_prof_ctx_get(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -594,6 +571,7 @@ arena_prof_ctx_get(const void *ptr)
|
|||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
size_t pageind, mapbits;
|
size_t pageind, mapbits;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||||
|
|
||||||
@ -612,7 +590,7 @@ arena_prof_ctx_get(const void *ptr)
|
|||||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
unsigned regind;
|
unsigned regind;
|
||||||
|
|
||||||
dassert(run->magic == ARENA_RUN_MAGIC);
|
assert(run->magic == ARENA_RUN_MAGIC);
|
||||||
regind = arena_run_regind(run, bin_info, ptr);
|
regind = arena_run_regind(run, bin_info, ptr);
|
||||||
ret = *(prof_ctx_t **)((uintptr_t)run +
|
ret = *(prof_ctx_t **)((uintptr_t)run +
|
||||||
bin_info->ctx0_offset + (regind *
|
bin_info->ctx0_offset + (regind *
|
||||||
@ -630,6 +608,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
size_t pageind, mapbits;
|
size_t pageind, mapbits;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||||
|
|
||||||
@ -647,7 +626,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
arena_bin_info_t *bin_info;
|
arena_bin_info_t *bin_info;
|
||||||
unsigned regind;
|
unsigned regind;
|
||||||
|
|
||||||
dassert(run->magic == ARENA_RUN_MAGIC);
|
assert(run->magic == ARENA_RUN_MAGIC);
|
||||||
binind = arena_bin_index(chunk->arena, bin);
|
binind = arena_bin_index(chunk->arena, bin);
|
||||||
bin_info = &arena_bin_info[binind];
|
bin_info = &arena_bin_info[binind];
|
||||||
regind = arena_run_regind(run, bin_info, ptr);
|
regind = arena_run_regind(run, bin_info, ptr);
|
||||||
@ -659,7 +638,6 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
} else
|
} else
|
||||||
chunk->map[pageind-map_bias].prof_ctx = ctx;
|
chunk->map[pageind-map_bias].prof_ctx = ctx;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||||
@ -668,7 +646,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
|||||||
arena_chunk_map_t *mapelm;
|
arena_chunk_map_t *mapelm;
|
||||||
|
|
||||||
assert(arena != NULL);
|
assert(arena != NULL);
|
||||||
dassert(arena->magic == ARENA_MAGIC);
|
assert(arena->magic == ARENA_MAGIC);
|
||||||
assert(chunk->arena == arena);
|
assert(chunk->arena == arena);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||||
@ -678,63 +656,57 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
|||||||
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||||
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
tcache_t *tcache;
|
tcache_t *tcache;
|
||||||
|
|
||||||
if ((tcache = tcache_get()) != NULL)
|
if (config_tcache && (tcache = tcache_get()) != NULL)
|
||||||
tcache_dalloc_small(tcache, ptr);
|
tcache_dalloc_small(tcache, ptr);
|
||||||
else {
|
else {
|
||||||
#endif
|
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
arena_bin_t *bin;
|
arena_bin_t *bin;
|
||||||
|
|
||||||
run = (arena_run_t *)((uintptr_t)chunk +
|
run = (arena_run_t *)((uintptr_t)chunk +
|
||||||
(uintptr_t)((pageind - (mapelm->bits >>
|
(uintptr_t)((pageind - (mapelm->bits >>
|
||||||
PAGE_SHIFT)) << PAGE_SHIFT));
|
PAGE_SHIFT)) << PAGE_SHIFT));
|
||||||
dassert(run->magic == ARENA_RUN_MAGIC);
|
assert(run->magic == ARENA_RUN_MAGIC);
|
||||||
bin = run->bin;
|
bin = run->bin;
|
||||||
#ifdef JEMALLOC_DEBUG
|
if (config_debug) {
|
||||||
{
|
|
||||||
size_t binind = arena_bin_index(arena, bin);
|
size_t binind = arena_bin_index(arena, bin);
|
||||||
arena_bin_info_t *bin_info =
|
UNUSED arena_bin_info_t *bin_info =
|
||||||
&arena_bin_info[binind];
|
&arena_bin_info[binind];
|
||||||
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
||||||
(uintptr_t)bin_info->reg0_offset)) %
|
(uintptr_t)bin_info->reg0_offset)) %
|
||||||
bin_info->reg_size == 0);
|
bin_info->reg_size == 0);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
} else {
|
} else {
|
||||||
#ifdef JEMALLOC_TCACHE
|
if (config_tcache) {
|
||||||
size_t size = mapelm->bits & ~PAGE_MASK;
|
size_t size = mapelm->bits & ~PAGE_MASK;
|
||||||
|
|
||||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||||
if (size <= tcache_maxclass) {
|
if (size <= tcache_maxclass) {
|
||||||
tcache_t *tcache;
|
tcache_t *tcache;
|
||||||
|
|
||||||
if ((tcache = tcache_get()) != NULL)
|
if ((tcache = tcache_get()) != NULL)
|
||||||
tcache_dalloc_large(tcache, ptr, size);
|
tcache_dalloc_large(tcache, ptr, size);
|
||||||
else {
|
else {
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
arena_dalloc_large(arena, chunk, ptr);
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena_dalloc_large(arena, chunk, ptr);
|
arena_dalloc_large(arena, chunk, ptr);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena_dalloc_large(arena, chunk, ptr);
|
arena_dalloc_large(arena, chunk, ptr);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
arena_dalloc_large(arena, chunk, ptr);
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -28,20 +28,14 @@
|
|||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
extern size_t opt_lg_chunk;
|
extern size_t opt_lg_chunk;
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
extern bool opt_overcommit;
|
extern bool opt_overcommit;
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
/* Protects stats_chunks; currently not used for any other purpose. */
|
/* Protects stats_chunks; currently not used for any other purpose. */
|
||||||
extern malloc_mutex_t chunks_mtx;
|
extern malloc_mutex_t chunks_mtx;
|
||||||
/* Chunk statistics. */
|
/* Chunk statistics. */
|
||||||
extern chunk_stats_t stats_chunks;
|
extern chunk_stats_t stats_chunks;
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
|
||||||
extern rtree_t *chunks_rtree;
|
extern rtree_t *chunks_rtree;
|
||||||
#endif
|
|
||||||
|
|
||||||
extern size_t chunksize;
|
extern size_t chunksize;
|
||||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#ifdef JEMALLOC_DSS
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
@ -27,4 +26,3 @@ bool chunk_dss_boot(void);
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_DSS */
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
@ -15,9 +14,7 @@ extern bool swap_enabled;
|
|||||||
extern bool swap_prezeroed;
|
extern bool swap_prezeroed;
|
||||||
extern size_t swap_nfds;
|
extern size_t swap_nfds;
|
||||||
extern int *swap_fds;
|
extern int *swap_fds;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
extern size_t swap_avail;
|
extern size_t swap_avail;
|
||||||
#endif
|
|
||||||
|
|
||||||
void *chunk_alloc_swap(size_t size, bool *zero);
|
void *chunk_alloc_swap(size_t size, bool *zero);
|
||||||
bool chunk_in_swap(void *chunk);
|
bool chunk_in_swap(void *chunk);
|
||||||
@ -31,4 +28,3 @@ bool chunk_swap_boot(void);
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_SWAP */
|
|
||||||
|
@ -30,10 +30,8 @@ struct ckhc_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ckh_s {
|
struct ckh_s {
|
||||||
#ifdef JEMALLOC_DEBUG
|
|
||||||
#define CKH_MAGIC 0x3af2489d
|
#define CKH_MAGIC 0x3af2489d
|
||||||
uint32_t magic;
|
uint32_t magic;
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CKH_COUNT
|
#ifdef CKH_COUNT
|
||||||
/* Counters used to get an idea of performance. */
|
/* Counters used to get an idea of performance. */
|
||||||
|
@ -32,7 +32,6 @@ struct ctl_arena_stats_s {
|
|||||||
unsigned nthreads;
|
unsigned nthreads;
|
||||||
size_t pactive;
|
size_t pactive;
|
||||||
size_t pdirty;
|
size_t pdirty;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
arena_stats_t astats;
|
arena_stats_t astats;
|
||||||
|
|
||||||
/* Aggregate stats for small size classes, based on bin stats. */
|
/* Aggregate stats for small size classes, based on bin stats. */
|
||||||
@ -43,11 +42,9 @@ struct ctl_arena_stats_s {
|
|||||||
|
|
||||||
malloc_bin_stats_t *bstats; /* nbins elements. */
|
malloc_bin_stats_t *bstats; /* nbins elements. */
|
||||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ctl_stats_s {
|
struct ctl_stats_s {
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
size_t allocated;
|
size_t allocated;
|
||||||
size_t active;
|
size_t active;
|
||||||
size_t mapped;
|
size_t mapped;
|
||||||
@ -61,11 +58,8 @@ struct ctl_stats_s {
|
|||||||
uint64_t nmalloc; /* huge_nmalloc */
|
uint64_t nmalloc; /* huge_nmalloc */
|
||||||
uint64_t ndalloc; /* huge_ndalloc */
|
uint64_t ndalloc; /* huge_ndalloc */
|
||||||
} huge;
|
} huge;
|
||||||
#endif
|
|
||||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
size_t swap_avail;
|
size_t swap_avail;
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
|
@ -9,18 +9,14 @@ typedef struct extent_node_s extent_node_t;
|
|||||||
|
|
||||||
/* Tree of extents. */
|
/* Tree of extents. */
|
||||||
struct extent_node_s {
|
struct extent_node_s {
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
/* Linkage for the size/address-ordered tree. */
|
/* Linkage for the size/address-ordered tree. */
|
||||||
rb_node(extent_node_t) link_szad;
|
rb_node(extent_node_t) link_szad;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Linkage for the address-ordered tree. */
|
/* Linkage for the address-ordered tree. */
|
||||||
rb_node(extent_node_t) link_ad;
|
rb_node(extent_node_t) link_ad;
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/* Profile counters, used for huge objects. */
|
/* Profile counters, used for huge objects. */
|
||||||
prof_ctx_t *prof_ctx;
|
prof_ctx_t *prof_ctx;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Pointer to the extent that this tree node is responsible for. */
|
/* Pointer to the extent that this tree node is responsible for. */
|
||||||
void *addr;
|
void *addr;
|
||||||
@ -34,9 +30,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
|
||||||
#endif
|
|
||||||
|
|
||||||
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)
|
||||||
|
|
||||||
|
@ -9,12 +9,10 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
/* Huge allocation statistics. */
|
/* Huge allocation statistics. */
|
||||||
extern uint64_t huge_nmalloc;
|
extern uint64_t huge_nmalloc;
|
||||||
extern uint64_t huge_ndalloc;
|
extern uint64_t huge_ndalloc;
|
||||||
extern size_t huge_allocated;
|
extern size_t huge_allocated;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Protects chunk-related data structures. */
|
/* Protects chunk-related data structures. */
|
||||||
extern malloc_mutex_t huge_mtx;
|
extern malloc_mutex_t huge_mtx;
|
||||||
@ -27,10 +25,8 @@ void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
size_t alignment, bool zero);
|
size_t alignment, bool zero);
|
||||||
void huge_dalloc(void *ptr, bool unmap);
|
void huge_dalloc(void *ptr, bool unmap);
|
||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(const void *ptr);
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||||
#endif
|
|
||||||
bool huge_boot(void);
|
bool huge_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
|
@ -35,6 +35,125 @@
|
|||||||
|
|
||||||
#include "jemalloc/internal/private_namespace.h"
|
#include "jemalloc/internal/private_namespace.h"
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_CC_SILENCE
|
||||||
|
#define UNUSED JEMALLOC_ATTR(unused)
|
||||||
|
#else
|
||||||
|
#define UNUSED
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const bool config_debug =
|
||||||
|
#ifdef JEMALLOC_DEBUG
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_dss =
|
||||||
|
#ifdef JEMALLOC_DSS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_dynamic_page_shift =
|
||||||
|
#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_fill =
|
||||||
|
#ifdef JEMALLOC_FILL
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_lazy_lock =
|
||||||
|
#ifdef JEMALLOC_LAZY_LOCK
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_prof =
|
||||||
|
#ifdef JEMALLOC_PROF
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_prof_libgcc =
|
||||||
|
#ifdef JEMALLOC_PROF_LIBGCC
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_prof_libunwind =
|
||||||
|
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_stats =
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_swap =
|
||||||
|
#ifdef JEMALLOC_SWAP
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_sysv =
|
||||||
|
#ifdef JEMALLOC_SYSV
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_tcache =
|
||||||
|
#ifdef JEMALLOC_TCACHE
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_tiny =
|
||||||
|
#ifdef JEMALLOC_TINY
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_tls =
|
||||||
|
#ifdef JEMALLOC_TLS
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_xmalloc =
|
||||||
|
#ifdef JEMALLOC_XMALLOC
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
static const bool config_ivsalloc =
|
||||||
|
#ifdef JEMALLOC_IVSALLOC
|
||||||
|
true
|
||||||
|
#else
|
||||||
|
false
|
||||||
|
#endif
|
||||||
|
;
|
||||||
|
|
||||||
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
|
||||||
#include <libkern/OSAtomic.h>
|
#include <libkern/OSAtomic.h>
|
||||||
#endif
|
#endif
|
||||||
@ -82,11 +201,11 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
|
|||||||
# endif
|
# endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef JEMALLOC_DEBUG
|
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||||
# define dassert(e) assert(e)
|
#define cassert(c) do { \
|
||||||
#else
|
if ((c) == false) \
|
||||||
# define dassert(e)
|
assert(false); \
|
||||||
#endif
|
} while (0)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
|
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
|
||||||
@ -265,30 +384,20 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
|
|||||||
#endif
|
#endif
|
||||||
#include "jemalloc/internal/prof.h"
|
#include "jemalloc/internal/prof.h"
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
uint64_t allocated;
|
uint64_t allocated;
|
||||||
uint64_t deallocated;
|
uint64_t deallocated;
|
||||||
} thread_allocated_t;
|
} thread_allocated_t;
|
||||||
#endif
|
|
||||||
|
|
||||||
#undef JEMALLOC_H_STRUCTS
|
#undef JEMALLOC_H_STRUCTS
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#define JEMALLOC_H_EXTERNS
|
#define JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
extern bool opt_abort;
|
extern bool opt_abort;
|
||||||
#ifdef JEMALLOC_FILL
|
|
||||||
extern bool opt_junk;
|
extern bool opt_junk;
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_SYSV
|
|
||||||
extern bool opt_sysv;
|
extern bool opt_sysv;
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_XMALLOC
|
|
||||||
extern bool opt_xmalloc;
|
extern bool opt_xmalloc;
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_FILL
|
|
||||||
extern bool opt_zero;
|
extern bool opt_zero;
|
||||||
#endif
|
|
||||||
extern size_t opt_narenas;
|
extern size_t opt_narenas;
|
||||||
|
|
||||||
#ifdef DYNAMIC_PAGE_SHIFT
|
#ifdef DYNAMIC_PAGE_SHIFT
|
||||||
@ -327,8 +436,7 @@ extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
|
|||||||
extern arena_t **arenas;
|
extern arena_t **arenas;
|
||||||
extern unsigned narenas;
|
extern unsigned narenas;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
#ifndef NO_TLS
|
||||||
# ifndef NO_TLS
|
|
||||||
extern __thread thread_allocated_t thread_allocated_tls;
|
extern __thread thread_allocated_t thread_allocated_tls;
|
||||||
# define ALLOCATED_GET() (thread_allocated_tls.allocated)
|
# define ALLOCATED_GET() (thread_allocated_tls.allocated)
|
||||||
# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
|
# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
|
||||||
@ -338,10 +446,7 @@ extern __thread thread_allocated_t thread_allocated_tls;
|
|||||||
thread_allocated_tls.allocated += a; \
|
thread_allocated_tls.allocated += a; \
|
||||||
thread_allocated_tls.deallocated += d; \
|
thread_allocated_tls.deallocated += d; \
|
||||||
} while (0)
|
} while (0)
|
||||||
# else
|
#else
|
||||||
extern pthread_key_t thread_allocated_tsd;
|
|
||||||
thread_allocated_t *thread_allocated_get_hard(void);
|
|
||||||
|
|
||||||
# define ALLOCATED_GET() (thread_allocated_get()->allocated)
|
# define ALLOCATED_GET() (thread_allocated_get()->allocated)
|
||||||
# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
|
# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
|
||||||
# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
|
# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
|
||||||
@ -351,8 +456,9 @@ thread_allocated_t *thread_allocated_get_hard(void);
|
|||||||
thread_allocated->allocated += (a); \
|
thread_allocated->allocated += (a); \
|
||||||
thread_allocated->deallocated += (d); \
|
thread_allocated->deallocated += (d); \
|
||||||
} while (0)
|
} while (0)
|
||||||
# endif
|
|
||||||
#endif
|
#endif
|
||||||
|
extern pthread_key_t thread_allocated_tsd;
|
||||||
|
thread_allocated_t *thread_allocated_get_hard(void);
|
||||||
|
|
||||||
arena_t *arenas_extend(unsigned ind);
|
arena_t *arenas_extend(unsigned ind);
|
||||||
arena_t *choose_arena_hard(void);
|
arena_t *choose_arena_hard(void);
|
||||||
@ -403,9 +509,7 @@ size_t s2u(size_t size);
|
|||||||
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
|
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
|
||||||
void malloc_write(const char *s);
|
void malloc_write(const char *s);
|
||||||
arena_t *choose_arena(void);
|
arena_t *choose_arena(void);
|
||||||
# if (defined(JEMALLOC_STATS) && defined(NO_TLS))
|
|
||||||
thread_allocated_t *thread_allocated_get(void);
|
thread_allocated_t *thread_allocated_get(void);
|
||||||
# endif
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||||
@ -565,7 +669,6 @@ choose_arena(void)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
|
|
||||||
JEMALLOC_INLINE thread_allocated_t *
|
JEMALLOC_INLINE thread_allocated_t *
|
||||||
thread_allocated_get(void)
|
thread_allocated_get(void)
|
||||||
{
|
{
|
||||||
@ -577,7 +680,6 @@ thread_allocated_get(void)
|
|||||||
return (thread_allocated);
|
return (thread_allocated);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "jemalloc/internal/bitmap.h"
|
#include "jemalloc/internal/bitmap.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
#include "jemalloc/internal/rtree.h"
|
||||||
@ -593,9 +695,7 @@ void *imalloc(size_t size);
|
|||||||
void *icalloc(size_t size);
|
void *icalloc(size_t size);
|
||||||
void *ipalloc(size_t usize, size_t alignment, bool zero);
|
void *ipalloc(size_t usize, size_t alignment, bool zero);
|
||||||
size_t isalloc(const void *ptr);
|
size_t isalloc(const void *ptr);
|
||||||
# ifdef JEMALLOC_IVSALLOC
|
|
||||||
size_t ivsalloc(const void *ptr);
|
size_t ivsalloc(const void *ptr);
|
||||||
# endif
|
|
||||||
void idalloc(void *ptr);
|
void idalloc(void *ptr);
|
||||||
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
|
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
|
||||||
bool zero, bool no_move);
|
bool zero, bool no_move);
|
||||||
@ -674,20 +774,18 @@ isalloc(const void *ptr)
|
|||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk != ptr) {
|
if (chunk != ptr) {
|
||||||
/* Region. */
|
/* Region. */
|
||||||
dassert(chunk->arena->magic == ARENA_MAGIC);
|
assert(chunk->arena->magic == ARENA_MAGIC);
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof)
|
||||||
ret = arena_salloc_demote(ptr);
|
ret = arena_salloc_demote(ptr);
|
||||||
#else
|
else
|
||||||
ret = arena_salloc(ptr);
|
ret = arena_salloc(ptr);
|
||||||
#endif
|
|
||||||
} else
|
} else
|
||||||
ret = huge_salloc(ptr);
|
ret = huge_salloc(ptr);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
ivsalloc(const void *ptr)
|
ivsalloc(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -698,7 +796,6 @@ ivsalloc(const void *ptr)
|
|||||||
|
|
||||||
return (isalloc(ptr));
|
return (isalloc(ptr));
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
idalloc(void *ptr)
|
idalloc(void *ptr)
|
||||||
|
@ -3,14 +3,14 @@
|
|||||||
|
|
||||||
#ifdef JEMALLOC_OSSPIN
|
#ifdef JEMALLOC_OSSPIN
|
||||||
typedef OSSpinLock malloc_mutex_t;
|
typedef OSSpinLock malloc_mutex_t;
|
||||||
|
#define MALLOC_MUTEX_INITIALIZER 0
|
||||||
#else
|
#else
|
||||||
typedef pthread_mutex_t malloc_mutex_t;
|
typedef pthread_mutex_t malloc_mutex_t;
|
||||||
#endif
|
# ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
|
||||||
|
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
|
||||||
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
|
# else
|
||||||
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
|
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
|
||||||
#else
|
# endif
|
||||||
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_TYPES
|
#ifdef JEMALLOC_H_TYPES
|
||||||
|
|
||||||
@ -297,6 +296,8 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
|
|||||||
uint64_t r;
|
uint64_t r;
|
||||||
double u;
|
double u;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute sample threshold as a geometrically distributed random
|
* Compute sample threshold as a geometrically distributed random
|
||||||
* variable with mean (2^opt_lg_prof_sample).
|
* variable with mean (2^opt_lg_prof_sample).
|
||||||
@ -329,12 +330,13 @@ prof_ctx_get(const void *ptr)
|
|||||||
prof_ctx_t *ret;
|
prof_ctx_t *ret;
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk != ptr) {
|
if (chunk != ptr) {
|
||||||
/* Region. */
|
/* Region. */
|
||||||
dassert(chunk->arena->magic == ARENA_MAGIC);
|
assert(chunk->arena->magic == ARENA_MAGIC);
|
||||||
|
|
||||||
ret = arena_prof_ctx_get(ptr);
|
ret = arena_prof_ctx_get(ptr);
|
||||||
} else
|
} else
|
||||||
@ -348,12 +350,13 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (chunk != ptr) {
|
if (chunk != ptr) {
|
||||||
/* Region. */
|
/* Region. */
|
||||||
dassert(chunk->arena->magic == ARENA_MAGIC);
|
assert(chunk->arena->magic == ARENA_MAGIC);
|
||||||
|
|
||||||
arena_prof_ctx_set(ptr, ctx);
|
arena_prof_ctx_set(ptr, ctx);
|
||||||
} else
|
} else
|
||||||
@ -365,6 +368,7 @@ prof_sample_accum_update(size_t size)
|
|||||||
{
|
{
|
||||||
prof_tdata_t *prof_tdata;
|
prof_tdata_t *prof_tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
/* Sampling logic is unnecessary if the interval is 1. */
|
/* Sampling logic is unnecessary if the interval is 1. */
|
||||||
assert(opt_lg_prof_sample != 0);
|
assert(opt_lg_prof_sample != 0);
|
||||||
|
|
||||||
@ -391,6 +395,7 @@ JEMALLOC_INLINE void
|
|||||||
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
|
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size == isalloc(ptr));
|
assert(size == isalloc(ptr));
|
||||||
|
|
||||||
@ -437,6 +442,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
|
|||||||
{
|
{
|
||||||
prof_thr_cnt_t *told_cnt;
|
prof_thr_cnt_t *told_cnt;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
|
||||||
|
|
||||||
if (ptr != NULL) {
|
if (ptr != NULL) {
|
||||||
@ -510,6 +516,8 @@ prof_free(const void *ptr, size_t size)
|
|||||||
{
|
{
|
||||||
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
prof_ctx_t *ctx = prof_ctx_get(ptr);
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if ((uintptr_t)ctx > (uintptr_t)1) {
|
if ((uintptr_t)ctx > (uintptr_t)1) {
|
||||||
assert(size == isalloc(ptr));
|
assert(size == isalloc(ptr));
|
||||||
prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
|
prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
|
||||||
@ -544,4 +552,3 @@ prof_free(const void *ptr, size_t size)
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_PROF */
|
|
||||||
|
@ -3,23 +3,16 @@
|
|||||||
|
|
||||||
#define UMAX2S_BUFSIZE 65
|
#define UMAX2S_BUFSIZE 65
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||||
typedef struct arena_stats_s arena_stats_t;
|
typedef struct arena_stats_s arena_stats_t;
|
||||||
#endif
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
typedef struct chunk_stats_s chunk_stats_t;
|
typedef struct chunk_stats_s chunk_stats_t;
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_TYPES */
|
#endif /* JEMALLOC_H_TYPES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_STRUCTS
|
#ifdef JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
struct tcache_bin_stats_s {
|
struct tcache_bin_stats_s {
|
||||||
/*
|
/*
|
||||||
* Number of allocation requests that corresponded to the size of this
|
* Number of allocation requests that corresponded to the size of this
|
||||||
@ -27,7 +20,6 @@ struct tcache_bin_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
uint64_t nrequests;
|
||||||
};
|
};
|
||||||
#endif
|
|
||||||
|
|
||||||
struct malloc_bin_stats_s {
|
struct malloc_bin_stats_s {
|
||||||
/*
|
/*
|
||||||
@ -52,13 +44,11 @@ struct malloc_bin_stats_s {
|
|||||||
*/
|
*/
|
||||||
uint64_t nrequests;
|
uint64_t nrequests;
|
||||||
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
/* Number of tcache fills from this bin. */
|
/* Number of tcache fills from this bin. */
|
||||||
uint64_t nfills;
|
uint64_t nfills;
|
||||||
|
|
||||||
/* Number of tcache flushes to this bin. */
|
/* Number of tcache flushes to this bin. */
|
||||||
uint64_t nflushes;
|
uint64_t nflushes;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Total number of runs created for this bin's size class. */
|
/* Total number of runs created for this bin's size class. */
|
||||||
uint64_t nruns;
|
uint64_t nruns;
|
||||||
@ -127,14 +117,10 @@ struct arena_stats_s {
|
|||||||
*/
|
*/
|
||||||
malloc_large_stats_t *lstats;
|
malloc_large_stats_t *lstats;
|
||||||
};
|
};
|
||||||
#endif /* JEMALLOC_STATS */
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
struct chunk_stats_s {
|
struct chunk_stats_s {
|
||||||
# ifdef JEMALLOC_STATS
|
|
||||||
/* Number of chunks that were allocated. */
|
/* Number of chunks that were allocated. */
|
||||||
uint64_t nchunks;
|
uint64_t nchunks;
|
||||||
# endif
|
|
||||||
|
|
||||||
/* High-water mark for number of chunks allocated. */
|
/* High-water mark for number of chunks allocated. */
|
||||||
size_t highchunks;
|
size_t highchunks;
|
||||||
@ -146,7 +132,6 @@ struct chunk_stats_s {
|
|||||||
*/
|
*/
|
||||||
size_t curchunks;
|
size_t curchunks;
|
||||||
};
|
};
|
||||||
#endif /* JEMALLOC_STATS */
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_STRUCTS */
|
#endif /* JEMALLOC_H_STRUCTS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -154,24 +139,19 @@ struct chunk_stats_s {
|
|||||||
|
|
||||||
extern bool opt_stats_print;
|
extern bool opt_stats_print;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
extern size_t stats_cactive;
|
extern size_t stats_cactive;
|
||||||
#endif
|
|
||||||
|
|
||||||
char *u2s(uint64_t x, unsigned base, char *s);
|
char *u2s(uint64_t x, unsigned base, char *s);
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||||
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
|
||||||
void malloc_printf(const char *format, ...)
|
void malloc_printf(const char *format, ...)
|
||||||
JEMALLOC_ATTR(format(printf, 1, 2));
|
JEMALLOC_ATTR(format(printf, 1, 2));
|
||||||
#endif
|
|
||||||
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
||||||
const char *opts);
|
const char *opts);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
size_t stats_cactive_get(void);
|
size_t stats_cactive_get(void);
|
||||||
@ -202,6 +182,5 @@ stats_cactive_sub(size_t size)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_STATS */
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -42,9 +42,7 @@ struct tcache_bin_info_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct tcache_bin_s {
|
struct tcache_bin_s {
|
||||||
# ifdef JEMALLOC_STATS
|
|
||||||
tcache_bin_stats_t tstats;
|
tcache_bin_stats_t tstats;
|
||||||
# endif
|
|
||||||
int low_water; /* Min # cached since last GC. */
|
int low_water; /* Min # cached since last GC. */
|
||||||
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
|
||||||
unsigned ncached; /* # of cached objects. */
|
unsigned ncached; /* # of cached objects. */
|
||||||
@ -52,12 +50,8 @@ struct tcache_bin_s {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct tcache_s {
|
struct tcache_s {
|
||||||
# ifdef JEMALLOC_STATS
|
|
||||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||||
# endif
|
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
||||||
# endif
|
|
||||||
arena_t *arena; /* This thread's arena. */
|
arena_t *arena; /* This thread's arena. */
|
||||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||||
unsigned next_gc_bin; /* Next bin to GC. */
|
unsigned next_gc_bin; /* Next bin to GC. */
|
||||||
@ -109,23 +103,15 @@ extern size_t tcache_maxclass;
|
|||||||
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
/* Number of tcache allocation/deallocation events between incremental GCs. */
|
||||||
extern unsigned tcache_gc_incr;
|
extern unsigned tcache_gc_incr;
|
||||||
|
|
||||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
tcache_t *tcache);
|
||||||
, tcache_t *tcache
|
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
#endif
|
tcache_t *tcache);
|
||||||
);
|
|
||||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
tcache_t *tcache_create(arena_t *arena);
|
tcache_t *tcache_create(arena_t *arena);
|
||||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
size_t binind);
|
size_t binind);
|
||||||
void tcache_destroy(tcache_t *tcache);
|
void tcache_destroy(tcache_t *tcache);
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
|
||||||
#endif
|
|
||||||
bool tcache_boot(void);
|
bool tcache_boot(void);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
@ -195,19 +181,11 @@ tcache_event(tcache_t *tcache)
|
|||||||
if (binind < nbins) {
|
if (binind < nbins) {
|
||||||
tcache_bin_flush_small(tbin, binind,
|
tcache_bin_flush_small(tbin, binind,
|
||||||
tbin->ncached - tbin->low_water +
|
tbin->ncached - tbin->low_water +
|
||||||
(tbin->low_water >> 2)
|
(tbin->low_water >> 2), tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
tcache_bin_flush_large(tbin, binind,
|
tcache_bin_flush_large(tbin, binind,
|
||||||
tbin->ncached - tbin->low_water +
|
tbin->ncached - tbin->low_water +
|
||||||
(tbin->low_water >> 2)
|
(tbin->low_water >> 2), tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||||
@ -268,21 +246,19 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
|||||||
assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
|
assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
|
||||||
|
|
||||||
if (zero == false) {
|
if (zero == false) {
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill) {
|
||||||
if (opt_junk)
|
if (opt_junk)
|
||||||
memset(ret, 0xa5, size);
|
memset(ret, 0xa5, size);
|
||||||
else if (opt_zero)
|
else if (opt_zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
#endif
|
}
|
||||||
} else
|
} else
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
tbin->tstats.nrequests++;
|
tbin->tstats.nrequests++;
|
||||||
#endif
|
if (config_prof)
|
||||||
#ifdef JEMALLOC_PROF
|
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
||||||
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
|
|
||||||
#endif
|
|
||||||
tcache_event(tcache);
|
tcache_event(tcache);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -309,28 +285,28 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
|||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
} else {
|
} else {
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof) {
|
||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
arena_chunk_t *chunk =
|
||||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||||
PAGE_SHIFT);
|
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||||
chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
|
PAGE_SHIFT);
|
||||||
#endif
|
chunk->map[pageind-map_bias].bits &=
|
||||||
|
~CHUNK_MAP_CLASS_MASK;
|
||||||
|
}
|
||||||
if (zero == false) {
|
if (zero == false) {
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill) {
|
||||||
if (opt_junk)
|
if (opt_junk)
|
||||||
memset(ret, 0xa5, size);
|
memset(ret, 0xa5, size);
|
||||||
else if (opt_zero)
|
else if (opt_zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
#endif
|
}
|
||||||
} else
|
} else
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
tbin->tstats.nrequests++;
|
tbin->tstats.nrequests++;
|
||||||
#endif
|
if (config_prof)
|
||||||
#ifdef JEMALLOC_PROF
|
tcache->prof_accumbytes += size;
|
||||||
tcache->prof_accumbytes += size;
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tcache_event(tcache);
|
tcache_event(tcache);
|
||||||
@ -357,26 +333,20 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
|||||||
mapelm = &chunk->map[pageind-map_bias];
|
mapelm = &chunk->map[pageind-map_bias];
|
||||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||||
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
||||||
dassert(run->magic == ARENA_RUN_MAGIC);
|
assert(run->magic == ARENA_RUN_MAGIC);
|
||||||
bin = run->bin;
|
bin = run->bin;
|
||||||
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
||||||
sizeof(arena_bin_t);
|
sizeof(arena_bin_t);
|
||||||
assert(binind < nbins);
|
assert(binind < nbins);
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && opt_junk)
|
||||||
if (opt_junk)
|
|
||||||
memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
|
memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
|
||||||
#endif
|
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
tbin_info = &tcache_bin_info[binind];
|
tbin_info = &tcache_bin_info[binind];
|
||||||
if (tbin->ncached == tbin_info->ncached_max) {
|
if (tbin->ncached == tbin_info->ncached_max) {
|
||||||
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
|
||||||
1)
|
1), tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
tbin->avail[tbin->ncached] = ptr;
|
tbin->avail[tbin->ncached] = ptr;
|
||||||
@ -403,20 +373,14 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
|||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||||
binind = nbins + (size >> PAGE_SHIFT) - 1;
|
binind = nbins + (size >> PAGE_SHIFT) - 1;
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && opt_junk)
|
||||||
if (opt_junk)
|
|
||||||
memset(ptr, 0x5a, size);
|
memset(ptr, 0x5a, size);
|
||||||
#endif
|
|
||||||
|
|
||||||
tbin = &tcache->tbins[binind];
|
tbin = &tcache->tbins[binind];
|
||||||
tbin_info = &tcache_bin_info[binind];
|
tbin_info = &tcache_bin_info[binind];
|
||||||
if (tbin->ncached == tbin_info->ncached_max) {
|
if (tbin->ncached == tbin_info->ncached_max) {
|
||||||
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
|
||||||
1)
|
1), tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
assert(tbin->ncached < tbin_info->ncached_max);
|
assert(tbin->ncached < tbin_info->ncached_max);
|
||||||
tbin->avail[tbin->ncached] = ptr;
|
tbin->avail[tbin->ncached] = ptr;
|
||||||
|
@ -48,9 +48,11 @@
|
|||||||
/* Defined if __attribute__((...)) syntax is supported. */
|
/* Defined if __attribute__((...)) syntax is supported. */
|
||||||
#undef JEMALLOC_HAVE_ATTR
|
#undef JEMALLOC_HAVE_ATTR
|
||||||
#ifdef JEMALLOC_HAVE_ATTR
|
#ifdef JEMALLOC_HAVE_ATTR
|
||||||
# define JEMALLOC_ATTR(s) __attribute__((s))
|
# define JEMALLOC_CATTR(s, a) __attribute__((s))
|
||||||
|
# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
|
||||||
#else
|
#else
|
||||||
# define JEMALLOC_ATTR(s)
|
# define JEMALLOC_CATTR(s, a) a
|
||||||
|
# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */
|
||||||
|
734
src/arena.c
734
src/arena.c
File diff suppressed because it is too large
Load Diff
104
src/chunk.c
104
src/chunk.c
@ -5,18 +5,12 @@
|
|||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
bool opt_overcommit = true;
|
bool opt_overcommit = true;
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
malloc_mutex_t chunks_mtx;
|
malloc_mutex_t chunks_mtx;
|
||||||
chunk_stats_t stats_chunks;
|
chunk_stats_t stats_chunks;
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
|
||||||
rtree_t *chunks_rtree;
|
rtree_t *chunks_rtree;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Various chunk-related settings. */
|
/* Various chunk-related settings. */
|
||||||
size_t chunksize;
|
size_t chunksize;
|
||||||
@ -41,67 +35,50 @@ chunk_alloc(size_t size, bool base, bool *zero)
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
#ifdef JEMALLOC_SWAP
|
if (config_swap && swap_enabled) {
|
||||||
if (swap_enabled) {
|
|
||||||
ret = chunk_alloc_swap(size, zero);
|
ret = chunk_alloc_swap(size, zero);
|
||||||
if (ret != NULL)
|
if (ret != NULL)
|
||||||
goto RETURN;
|
goto RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (swap_enabled == false || opt_overcommit) {
|
if (swap_enabled == false || opt_overcommit) {
|
||||||
#endif
|
if (config_dss) {
|
||||||
#ifdef JEMALLOC_DSS
|
ret = chunk_alloc_dss(size, zero);
|
||||||
ret = chunk_alloc_dss(size, zero);
|
if (ret != NULL)
|
||||||
if (ret != NULL)
|
goto RETURN;
|
||||||
goto RETURN;
|
}
|
||||||
#endif
|
|
||||||
ret = chunk_alloc_mmap(size);
|
ret = chunk_alloc_mmap(size);
|
||||||
if (ret != NULL) {
|
if (ret != NULL) {
|
||||||
*zero = true;
|
*zero = true;
|
||||||
goto RETURN;
|
goto RETURN;
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
/* All strategies for allocation failed. */
|
/* All strategies for allocation failed. */
|
||||||
ret = NULL;
|
ret = NULL;
|
||||||
RETURN:
|
RETURN:
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
if (config_ivsalloc && base == false && ret != NULL) {
|
||||||
if (base == false && ret != NULL) {
|
|
||||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
||||||
chunk_dealloc(ret, size, true);
|
chunk_dealloc(ret, size, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
if ((config_stats || config_prof) && ret != NULL) {
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
if (ret != NULL) {
|
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
bool gdump;
|
bool gdump;
|
||||||
# endif
|
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
# ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
stats_chunks.nchunks += (size / chunksize);
|
stats_chunks.nchunks += (size / chunksize);
|
||||||
# endif
|
|
||||||
stats_chunks.curchunks += (size / chunksize);
|
stats_chunks.curchunks += (size / chunksize);
|
||||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||||
stats_chunks.highchunks = stats_chunks.curchunks;
|
stats_chunks.highchunks = stats_chunks.curchunks;
|
||||||
# ifdef JEMALLOC_PROF
|
if (config_prof)
|
||||||
gdump = true;
|
gdump = true;
|
||||||
# endif
|
} else if (config_prof)
|
||||||
}
|
|
||||||
# ifdef JEMALLOC_PROF
|
|
||||||
else
|
|
||||||
gdump = false;
|
gdump = false;
|
||||||
# endif
|
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
# ifdef JEMALLOC_PROF
|
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||||
if (opt_prof && opt_prof_gdump && gdump)
|
|
||||||
prof_gdump();
|
prof_gdump();
|
||||||
# endif
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -116,24 +93,20 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
|||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
|
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
if (config_ivsalloc)
|
||||||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||||
#endif
|
if (config_stats || config_prof) {
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
stats_chunks.curchunks -= (size / chunksize);
|
||||||
stats_chunks.curchunks -= (size / chunksize);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
if (unmap) {
|
if (unmap) {
|
||||||
#ifdef JEMALLOC_SWAP
|
if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
|
||||||
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
|
size) == false)
|
||||||
return;
|
return;
|
||||||
#endif
|
if (config_dss && chunk_dealloc_dss(chunk, size) == false)
|
||||||
#ifdef JEMALLOC_DSS
|
|
||||||
if (chunk_dealloc_dss(chunk, size) == false)
|
|
||||||
return;
|
return;
|
||||||
#endif
|
|
||||||
chunk_dealloc_mmap(chunk, size);
|
chunk_dealloc_mmap(chunk, size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -148,26 +121,23 @@ chunk_boot(void)
|
|||||||
chunksize_mask = chunksize - 1;
|
chunksize_mask = chunksize - 1;
|
||||||
chunk_npages = (chunksize >> PAGE_SHIFT);
|
chunk_npages = (chunksize >> PAGE_SHIFT);
|
||||||
|
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
if (config_stats || config_prof) {
|
||||||
if (malloc_mutex_init(&chunks_mtx))
|
if (malloc_mutex_init(&chunks_mtx))
|
||||||
|
return (true);
|
||||||
|
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||||
|
}
|
||||||
|
if (config_swap && chunk_swap_boot())
|
||||||
return (true);
|
return (true);
|
||||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
|
||||||
#endif
|
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
if (chunk_swap_boot())
|
|
||||||
return (true);
|
|
||||||
#endif
|
|
||||||
if (chunk_mmap_boot())
|
if (chunk_mmap_boot())
|
||||||
return (true);
|
return (true);
|
||||||
#ifdef JEMALLOC_DSS
|
if (config_dss && chunk_dss_boot())
|
||||||
if (chunk_dss_boot())
|
|
||||||
return (true);
|
return (true);
|
||||||
#endif
|
if (config_ivsalloc) {
|
||||||
#ifdef JEMALLOC_IVSALLOC
|
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
|
opt_lg_chunk);
|
||||||
if (chunks_rtree == NULL)
|
if (chunks_rtree == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
#endif
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#define JEMALLOC_CHUNK_DSS_C_
|
#define JEMALLOC_CHUNK_DSS_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
#ifdef JEMALLOC_DSS
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
@ -35,6 +34,8 @@ chunk_recycle_dss(size_t size, bool *zero)
|
|||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
extent_node_t *node, key;
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
key.addr = NULL;
|
key.addr = NULL;
|
||||||
key.size = size;
|
key.size = size;
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(&dss_mtx);
|
||||||
@ -74,6 +75,8 @@ chunk_alloc_dss(size_t size, bool *zero)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
ret = chunk_recycle_dss(size, zero);
|
ret = chunk_recycle_dss(size, zero);
|
||||||
if (ret != NULL)
|
if (ret != NULL)
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -131,6 +134,8 @@ chunk_dealloc_dss_record(void *chunk, size_t size)
|
|||||||
{
|
{
|
||||||
extent_node_t *xnode, *node, *prev, key;
|
extent_node_t *xnode, *node, *prev, key;
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
xnode = NULL;
|
xnode = NULL;
|
||||||
while (true) {
|
while (true) {
|
||||||
key.addr = (void *)((uintptr_t)chunk + size);
|
key.addr = (void *)((uintptr_t)chunk + size);
|
||||||
@ -204,6 +209,8 @@ chunk_in_dss(void *chunk)
|
|||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(&dss_mtx);
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||||
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
||||||
@ -220,6 +227,8 @@ chunk_dealloc_dss(void *chunk, size_t size)
|
|||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(&dss_mtx);
|
||||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||||
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
|
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
|
||||||
@ -269,6 +278,8 @@ bool
|
|||||||
chunk_dss_boot(void)
|
chunk_dss_boot(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_dss);
|
||||||
|
|
||||||
if (malloc_mutex_init(&dss_mtx))
|
if (malloc_mutex_init(&dss_mtx))
|
||||||
return (true);
|
return (true);
|
||||||
dss_base = sbrk(0);
|
dss_base = sbrk(0);
|
||||||
@ -281,4 +292,3 @@ chunk_dss_boot(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_DSS */
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#define JEMALLOC_CHUNK_SWAP_C_
|
#define JEMALLOC_CHUNK_SWAP_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
#ifdef JEMALLOC_SWAP
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
@ -9,9 +9,7 @@ bool swap_enabled;
|
|||||||
bool swap_prezeroed;
|
bool swap_prezeroed;
|
||||||
size_t swap_nfds;
|
size_t swap_nfds;
|
||||||
int *swap_fds;
|
int *swap_fds;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
size_t swap_avail;
|
size_t swap_avail;
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Base address of the mmap()ed file(s). */
|
/* Base address of the mmap()ed file(s). */
|
||||||
static void *swap_base;
|
static void *swap_base;
|
||||||
@ -42,6 +40,8 @@ chunk_recycle_swap(size_t size, bool *zero)
|
|||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
extent_node_t *node, key;
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
|
|
||||||
key.addr = NULL;
|
key.addr = NULL;
|
||||||
key.size = size;
|
key.size = size;
|
||||||
malloc_mutex_lock(&swap_mtx);
|
malloc_mutex_lock(&swap_mtx);
|
||||||
@ -65,9 +65,8 @@ chunk_recycle_swap(size_t size, bool *zero)
|
|||||||
node->size -= size;
|
node->size -= size;
|
||||||
extent_tree_szad_insert(&swap_chunks_szad, node);
|
extent_tree_szad_insert(&swap_chunks_szad, node);
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
swap_avail -= size;
|
swap_avail -= size;
|
||||||
#endif
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
malloc_mutex_unlock(&swap_mtx);
|
||||||
|
|
||||||
if (*zero)
|
if (*zero)
|
||||||
@ -84,6 +83,7 @@ chunk_alloc_swap(size_t size, bool *zero)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
assert(swap_enabled);
|
assert(swap_enabled);
|
||||||
|
|
||||||
ret = chunk_recycle_swap(size, zero);
|
ret = chunk_recycle_swap(size, zero);
|
||||||
@ -94,9 +94,8 @@ chunk_alloc_swap(size_t size, bool *zero)
|
|||||||
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
|
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
|
||||||
ret = swap_end;
|
ret = swap_end;
|
||||||
swap_end = (void *)((uintptr_t)swap_end + size);
|
swap_end = (void *)((uintptr_t)swap_end + size);
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
swap_avail -= size;
|
swap_avail -= size;
|
||||||
#endif
|
|
||||||
malloc_mutex_unlock(&swap_mtx);
|
malloc_mutex_unlock(&swap_mtx);
|
||||||
|
|
||||||
if (swap_prezeroed)
|
if (swap_prezeroed)
|
||||||
@ -116,6 +115,8 @@ chunk_dealloc_swap_record(void *chunk, size_t size)
|
|||||||
{
|
{
|
||||||
extent_node_t *xnode, *node, *prev, key;
|
extent_node_t *xnode, *node, *prev, key;
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
|
|
||||||
xnode = NULL;
|
xnode = NULL;
|
||||||
while (true) {
|
while (true) {
|
||||||
key.addr = (void *)((uintptr_t)chunk + size);
|
key.addr = (void *)((uintptr_t)chunk + size);
|
||||||
@ -189,6 +190,7 @@ chunk_in_swap(void *chunk)
|
|||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
assert(swap_enabled);
|
assert(swap_enabled);
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
malloc_mutex_lock(&swap_mtx);
|
||||||
@ -207,6 +209,7 @@ chunk_dealloc_swap(void *chunk, size_t size)
|
|||||||
{
|
{
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
assert(swap_enabled);
|
assert(swap_enabled);
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
malloc_mutex_lock(&swap_mtx);
|
||||||
@ -237,9 +240,8 @@ chunk_dealloc_swap(void *chunk, size_t size)
|
|||||||
} else
|
} else
|
||||||
madvise(chunk, size, MADV_DONTNEED);
|
madvise(chunk, size, MADV_DONTNEED);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
swap_avail += size;
|
swap_avail += size;
|
||||||
#endif
|
|
||||||
ret = false;
|
ret = false;
|
||||||
goto RETURN;
|
goto RETURN;
|
||||||
}
|
}
|
||||||
@ -260,6 +262,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
|
|||||||
size_t cumsize, voff;
|
size_t cumsize, voff;
|
||||||
size_t sizes[nfds];
|
size_t sizes[nfds];
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
|
|
||||||
malloc_mutex_lock(&swap_mtx);
|
malloc_mutex_lock(&swap_mtx);
|
||||||
|
|
||||||
/* Get file sizes. */
|
/* Get file sizes. */
|
||||||
@ -362,9 +366,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
|
|||||||
memcpy(swap_fds, fds, nfds * sizeof(int));
|
memcpy(swap_fds, fds, nfds * sizeof(int));
|
||||||
swap_nfds = nfds;
|
swap_nfds = nfds;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
swap_avail = cumsize;
|
swap_avail = cumsize;
|
||||||
#endif
|
|
||||||
|
|
||||||
swap_enabled = true;
|
swap_enabled = true;
|
||||||
|
|
||||||
@ -378,6 +381,8 @@ bool
|
|||||||
chunk_swap_boot(void)
|
chunk_swap_boot(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_swap);
|
||||||
|
|
||||||
if (malloc_mutex_init(&swap_mtx))
|
if (malloc_mutex_init(&swap_mtx))
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
@ -385,9 +390,8 @@ chunk_swap_boot(void)
|
|||||||
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
|
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
|
||||||
swap_nfds = 0;
|
swap_nfds = 0;
|
||||||
swap_fds = NULL;
|
swap_fds = NULL;
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats)
|
||||||
swap_avail = 0;
|
swap_avail = 0;
|
||||||
#endif
|
|
||||||
swap_base = NULL;
|
swap_base = NULL;
|
||||||
swap_end = NULL;
|
swap_end = NULL;
|
||||||
swap_max = NULL;
|
swap_max = NULL;
|
||||||
@ -397,6 +401,3 @@ chunk_swap_boot(void)
|
|||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
#endif /* JEMALLOC_SWAP */
|
|
||||||
|
17
src/ckh.c
17
src/ckh.c
@ -73,7 +73,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
|
|||||||
size_t hash1, hash2, bucket, cell;
|
size_t hash1, hash2, bucket, cell;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
assert(ckh->magic == CKH_MAGIC);
|
||||||
|
|
||||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||||
|
|
||||||
@ -394,9 +394,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
|||||||
goto RETURN;
|
goto RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_DEBUG
|
if (config_debug)
|
||||||
ckh->magic = CKH_MAGIC;
|
ckh->magic = CKH_MAGIC;
|
||||||
#endif
|
|
||||||
|
|
||||||
ret = false;
|
ret = false;
|
||||||
RETURN:
|
RETURN:
|
||||||
@ -408,7 +407,7 @@ ckh_delete(ckh_t *ckh)
|
|||||||
{
|
{
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
assert(ckh->magic == CKH_MAGIC);
|
||||||
|
|
||||||
#ifdef CKH_VERBOSE
|
#ifdef CKH_VERBOSE
|
||||||
malloc_printf(
|
malloc_printf(
|
||||||
@ -433,7 +432,7 @@ ckh_count(ckh_t *ckh)
|
|||||||
{
|
{
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
assert(ckh->magic == CKH_MAGIC);
|
||||||
|
|
||||||
return (ckh->count);
|
return (ckh->count);
|
||||||
}
|
}
|
||||||
@ -464,7 +463,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
|||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
assert(ckh->magic == CKH_MAGIC);
|
||||||
assert(ckh_search(ckh, key, NULL, NULL));
|
assert(ckh_search(ckh, key, NULL, NULL));
|
||||||
|
|
||||||
#ifdef CKH_COUNT
|
#ifdef CKH_COUNT
|
||||||
@ -489,7 +488,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
|||||||
size_t cell;
|
size_t cell;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
assert(ckh->magic == CKH_MAGIC);
|
||||||
|
|
||||||
cell = ckh_isearch(ckh, searchkey);
|
cell = ckh_isearch(ckh, searchkey);
|
||||||
if (cell != SIZE_T_MAX) {
|
if (cell != SIZE_T_MAX) {
|
||||||
@ -521,7 +520,7 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
|||||||
size_t cell;
|
size_t cell;
|
||||||
|
|
||||||
assert(ckh != NULL);
|
assert(ckh != NULL);
|
||||||
dassert(ckh->magic == CKH_MAGIC);
|
assert(ckh->magic == CKH_MAGIC);
|
||||||
|
|
||||||
cell = ckh_isearch(ckh, searchkey);
|
cell = ckh_isearch(ckh, searchkey);
|
||||||
if (cell != SIZE_T_MAX) {
|
if (cell != SIZE_T_MAX) {
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||||
{
|
{
|
||||||
@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
|||||||
/* Generate red-black tree functions. */
|
/* Generate red-black tree functions. */
|
||||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
||||||
extent_szad_comp)
|
extent_szad_comp)
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||||
|
80
src/huge.c
80
src/huge.c
@ -4,11 +4,9 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
uint64_t huge_nmalloc;
|
uint64_t huge_nmalloc;
|
||||||
uint64_t huge_ndalloc;
|
uint64_t huge_ndalloc;
|
||||||
size_t huge_allocated;
|
size_t huge_allocated;
|
||||||
#endif
|
|
||||||
|
|
||||||
malloc_mutex_t huge_mtx;
|
malloc_mutex_t huge_mtx;
|
||||||
|
|
||||||
@ -49,21 +47,19 @@ huge_malloc(size_t size, bool zero)
|
|||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
stats_cactive_add(csize);
|
stats_cactive_add(csize);
|
||||||
huge_nmalloc++;
|
huge_nmalloc++;
|
||||||
huge_allocated += csize;
|
huge_allocated += csize;
|
||||||
#endif
|
}
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && zero == false) {
|
||||||
if (zero == false) {
|
|
||||||
if (opt_junk)
|
if (opt_junk)
|
||||||
memset(ret, 0xa5, csize);
|
memset(ret, 0xa5, csize);
|
||||||
else if (opt_zero)
|
else if (opt_zero)
|
||||||
memset(ret, 0, csize);
|
memset(ret, 0, csize);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -134,21 +130,19 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
|||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
stats_cactive_add(chunk_size);
|
stats_cactive_add(chunk_size);
|
||||||
huge_nmalloc++;
|
huge_nmalloc++;
|
||||||
huge_allocated += chunk_size;
|
huge_allocated += chunk_size;
|
||||||
#endif
|
}
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && zero == false) {
|
||||||
if (zero == false) {
|
|
||||||
if (opt_junk)
|
if (opt_junk)
|
||||||
memset(ret, 0xa5, chunk_size);
|
memset(ret, 0xa5, chunk_size);
|
||||||
else if (opt_zero)
|
else if (opt_zero)
|
||||||
memset(ret, 0, chunk_size);
|
memset(ret, 0, chunk_size);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -164,12 +158,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
|||||||
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
||||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
||||||
assert(CHUNK_CEILING(oldsize) == oldsize);
|
assert(CHUNK_CEILING(oldsize) == oldsize);
|
||||||
#ifdef JEMALLOC_FILL
|
if (config_fill && opt_junk && size < oldsize) {
|
||||||
if (opt_junk && size < oldsize) {
|
|
||||||
memset((void *)((uintptr_t)ptr + size), 0x5a,
|
memset((void *)((uintptr_t)ptr + size), 0x5a,
|
||||||
oldsize - size);
|
oldsize - size);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
return (ptr);
|
return (ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -223,15 +215,10 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
* source nor the destination are in swap or dss.
|
* source nor the destination are in swap or dss.
|
||||||
*/
|
*/
|
||||||
#ifdef JEMALLOC_MREMAP_FIXED
|
#ifdef JEMALLOC_MREMAP_FIXED
|
||||||
if (oldsize >= chunksize
|
if (oldsize >= chunksize && (config_swap == false || swap_enabled ==
|
||||||
# ifdef JEMALLOC_SWAP
|
false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) ==
|
||||||
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
|
false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
|
||||||
chunk_in_swap(ret) == false))
|
chunk_in_dss(ret) == false))) {
|
||||||
# endif
|
|
||||||
# ifdef JEMALLOC_DSS
|
|
||||||
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
|
|
||||||
# endif
|
|
||||||
) {
|
|
||||||
size_t newsize = huge_salloc(ret);
|
size_t newsize = huge_salloc(ret);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -285,23 +272,16 @@ huge_dalloc(void *ptr, bool unmap)
|
|||||||
assert(node->addr == ptr);
|
assert(node->addr == ptr);
|
||||||
extent_tree_ad_remove(&huge, node);
|
extent_tree_ad_remove(&huge, node);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
stats_cactive_sub(node->size);
|
stats_cactive_sub(node->size);
|
||||||
huge_ndalloc++;
|
huge_ndalloc++;
|
||||||
huge_allocated -= node->size;
|
huge_allocated -= node->size;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
if (unmap) {
|
if (unmap && config_fill && (config_swap || config_dss) && opt_junk)
|
||||||
/* Unmap chunk. */
|
memset(node->addr, 0x5a, node->size);
|
||||||
#ifdef JEMALLOC_FILL
|
|
||||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
|
||||||
if (opt_junk)
|
|
||||||
memset(node->addr, 0x5a, node->size);
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk_dealloc(node->addr, node->size, unmap);
|
chunk_dealloc(node->addr, node->size, unmap);
|
||||||
|
|
||||||
@ -328,7 +308,6 @@ huge_salloc(const void *ptr)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
prof_ctx_t *
|
prof_ctx_t *
|
||||||
huge_prof_ctx_get(const void *ptr)
|
huge_prof_ctx_get(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -365,7 +344,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
|||||||
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
huge_boot(void)
|
huge_boot(void)
|
||||||
@ -376,11 +354,11 @@ huge_boot(void)
|
|||||||
return (true);
|
return (true);
|
||||||
extent_tree_ad_new(&huge);
|
extent_tree_ad_new(&huge);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
huge_nmalloc = 0;
|
huge_nmalloc = 0;
|
||||||
huge_ndalloc = 0;
|
huge_ndalloc = 0;
|
||||||
huge_allocated = 0;
|
huge_allocated = 0;
|
||||||
#endif
|
}
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
606
src/jemalloc.c
606
src/jemalloc.c
File diff suppressed because it is too large
Load Diff
75
src/prof.c
75
src/prof.c
@ -1,6 +1,5 @@
|
|||||||
#define JEMALLOC_PROF_C_
|
#define JEMALLOC_PROF_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||||
@ -102,6 +101,8 @@ void
|
|||||||
bt_init(prof_bt_t *bt, void **vec)
|
bt_init(prof_bt_t *bt, void **vec)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
bt->vec = vec;
|
bt->vec = vec;
|
||||||
bt->len = 0;
|
bt->len = 0;
|
||||||
}
|
}
|
||||||
@ -110,6 +111,8 @@ static void
|
|||||||
bt_destroy(prof_bt_t *bt)
|
bt_destroy(prof_bt_t *bt)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
idalloc(bt);
|
idalloc(bt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,6 +121,8 @@ bt_dup(prof_bt_t *bt)
|
|||||||
{
|
{
|
||||||
prof_bt_t *ret;
|
prof_bt_t *ret;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create a single allocation that has space for vec immediately
|
* Create a single allocation that has space for vec immediately
|
||||||
* following the prof_bt_t structure. The backtraces that get
|
* following the prof_bt_t structure. The backtraces that get
|
||||||
@ -141,6 +146,8 @@ static inline void
|
|||||||
prof_enter(void)
|
prof_enter(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
malloc_mutex_lock(&enq_mtx);
|
malloc_mutex_lock(&enq_mtx);
|
||||||
enq = true;
|
enq = true;
|
||||||
malloc_mutex_unlock(&enq_mtx);
|
malloc_mutex_unlock(&enq_mtx);
|
||||||
@ -153,6 +160,8 @@ prof_leave(void)
|
|||||||
{
|
{
|
||||||
bool idump, gdump;
|
bool idump, gdump;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
malloc_mutex_unlock(&bt2ctx_mtx);
|
malloc_mutex_unlock(&bt2ctx_mtx);
|
||||||
|
|
||||||
malloc_mutex_lock(&enq_mtx);
|
malloc_mutex_lock(&enq_mtx);
|
||||||
@ -178,6 +187,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(bt->len == 0);
|
assert(bt->len == 0);
|
||||||
assert(bt->vec != NULL);
|
assert(bt->vec != NULL);
|
||||||
assert(max <= (1U << opt_lg_prof_bt_max));
|
assert(max <= (1U << opt_lg_prof_bt_max));
|
||||||
@ -204,12 +214,13 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#elif (defined(JEMALLOC_PROF_LIBGCC))
|
||||||
#ifdef JEMALLOC_PROF_LIBGCC
|
|
||||||
static _Unwind_Reason_Code
|
static _Unwind_Reason_Code
|
||||||
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
|
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
return (_URC_NO_REASON);
|
return (_URC_NO_REASON);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,6 +229,8 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg)
|
|||||||
{
|
{
|
||||||
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
|
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (data->nignore > 0)
|
if (data->nignore > 0)
|
||||||
data->nignore--;
|
data->nignore--;
|
||||||
else {
|
else {
|
||||||
@ -235,10 +248,11 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
{
|
{
|
||||||
prof_unwind_data_t data = {bt, nignore, max};
|
prof_unwind_data_t data = {bt, nignore, max};
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
_Unwind_Backtrace(prof_unwind_callback, &data);
|
_Unwind_Backtrace(prof_unwind_callback, &data);
|
||||||
}
|
}
|
||||||
#endif
|
#elif (defined(JEMALLOC_PROF_GCC))
|
||||||
#ifdef JEMALLOC_PROF_GCC
|
|
||||||
void
|
void
|
||||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||||
{
|
{
|
||||||
@ -257,6 +271,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
} else \
|
} else \
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(nignore <= 3);
|
assert(nignore <= 3);
|
||||||
assert(max <= (1U << opt_lg_prof_bt_max));
|
assert(max <= (1U << opt_lg_prof_bt_max));
|
||||||
|
|
||||||
@ -407,6 +422,14 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
|||||||
BT_FRAME(130)
|
BT_FRAME(130)
|
||||||
#undef BT_FRAME
|
#undef BT_FRAME
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
void
|
||||||
|
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||||
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(false);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
prof_thr_cnt_t *
|
prof_thr_cnt_t *
|
||||||
@ -418,6 +441,8 @@ prof_lookup(prof_bt_t *bt)
|
|||||||
} ret;
|
} ret;
|
||||||
prof_tdata_t *prof_tdata;
|
prof_tdata_t *prof_tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
prof_tdata = PROF_TCACHE_GET();
|
prof_tdata = PROF_TCACHE_GET();
|
||||||
if (prof_tdata == NULL) {
|
if (prof_tdata == NULL) {
|
||||||
prof_tdata = prof_tdata_init();
|
prof_tdata = prof_tdata_init();
|
||||||
@ -553,6 +578,8 @@ prof_flush(bool propagate_err)
|
|||||||
bool ret = false;
|
bool ret = false;
|
||||||
ssize_t err;
|
ssize_t err;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
|
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
|
||||||
if (err == -1) {
|
if (err == -1) {
|
||||||
if (propagate_err == false) {
|
if (propagate_err == false) {
|
||||||
@ -573,6 +600,8 @@ prof_write(const char *s, bool propagate_err)
|
|||||||
{
|
{
|
||||||
unsigned i, slen, n;
|
unsigned i, slen, n;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
slen = strlen(s);
|
slen = strlen(s);
|
||||||
while (i < slen) {
|
while (i < slen) {
|
||||||
@ -602,6 +631,8 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
|
|||||||
prof_thr_cnt_t *thr_cnt;
|
prof_thr_cnt_t *thr_cnt;
|
||||||
prof_cnt_t tcnt;
|
prof_cnt_t tcnt;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
malloc_mutex_lock(&ctx->lock);
|
malloc_mutex_lock(&ctx->lock);
|
||||||
|
|
||||||
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
|
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
|
||||||
@ -648,6 +679,8 @@ static void
|
|||||||
prof_ctx_destroy(prof_ctx_t *ctx)
|
prof_ctx_destroy(prof_ctx_t *ctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check that ctx is still unused by any thread cache before destroying
|
* Check that ctx is still unused by any thread cache before destroying
|
||||||
* it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
|
* it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
|
||||||
@ -686,6 +719,8 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
|
|||||||
{
|
{
|
||||||
bool destroy;
|
bool destroy;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/* Merge cnt stats and detach from ctx. */
|
/* Merge cnt stats and detach from ctx. */
|
||||||
malloc_mutex_lock(&ctx->lock);
|
malloc_mutex_lock(&ctx->lock);
|
||||||
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
|
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
|
||||||
@ -723,6 +758,8 @@ prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
|
|||||||
char buf[UMAX2S_BUFSIZE];
|
char buf[UMAX2S_BUFSIZE];
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
|
if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
|
||||||
assert(ctx->cnt_summed.curbytes == 0);
|
assert(ctx->cnt_summed.curbytes == 0);
|
||||||
assert(ctx->cnt_summed.accumobjs == 0);
|
assert(ctx->cnt_summed.accumobjs == 0);
|
||||||
@ -767,6 +804,8 @@ prof_dump_maps(bool propagate_err)
|
|||||||
char mpath[6 + UMAX2S_BUFSIZE
|
char mpath[6 + UMAX2S_BUFSIZE
|
||||||
+ 5 + 1];
|
+ 5 + 1];
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
s = "/proc/";
|
s = "/proc/";
|
||||||
@ -827,6 +866,8 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
|
|||||||
char buf[UMAX2S_BUFSIZE];
|
char buf[UMAX2S_BUFSIZE];
|
||||||
size_t leak_nctx;
|
size_t leak_nctx;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
prof_enter();
|
prof_enter();
|
||||||
prof_dump_fd = creat(filename, 0644);
|
prof_dump_fd = creat(filename, 0644);
|
||||||
if (prof_dump_fd == -1) {
|
if (prof_dump_fd == -1) {
|
||||||
@ -917,6 +958,8 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
|
|||||||
char *s;
|
char *s;
|
||||||
unsigned i, slen;
|
unsigned i, slen;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Construct a filename of the form:
|
* Construct a filename of the form:
|
||||||
*
|
*
|
||||||
@ -979,6 +1022,8 @@ prof_fdump(void)
|
|||||||
{
|
{
|
||||||
char filename[DUMP_FILENAME_BUFSIZE];
|
char filename[DUMP_FILENAME_BUFSIZE];
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (prof_booted == false)
|
if (prof_booted == false)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -995,6 +1040,8 @@ prof_idump(void)
|
|||||||
{
|
{
|
||||||
char filename[DUMP_FILENAME_BUFSIZE];
|
char filename[DUMP_FILENAME_BUFSIZE];
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (prof_booted == false)
|
if (prof_booted == false)
|
||||||
return;
|
return;
|
||||||
malloc_mutex_lock(&enq_mtx);
|
malloc_mutex_lock(&enq_mtx);
|
||||||
@ -1019,6 +1066,8 @@ prof_mdump(const char *filename)
|
|||||||
{
|
{
|
||||||
char filename_buf[DUMP_FILENAME_BUFSIZE];
|
char filename_buf[DUMP_FILENAME_BUFSIZE];
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (opt_prof == false || prof_booted == false)
|
if (opt_prof == false || prof_booted == false)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
@ -1040,6 +1089,8 @@ prof_gdump(void)
|
|||||||
{
|
{
|
||||||
char filename[DUMP_FILENAME_BUFSIZE];
|
char filename[DUMP_FILENAME_BUFSIZE];
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (prof_booted == false)
|
if (prof_booted == false)
|
||||||
return;
|
return;
|
||||||
malloc_mutex_lock(&enq_mtx);
|
malloc_mutex_lock(&enq_mtx);
|
||||||
@ -1066,6 +1117,7 @@ prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
|||||||
uint64_t h;
|
uint64_t h;
|
||||||
prof_bt_t *bt = (prof_bt_t *)key;
|
prof_bt_t *bt = (prof_bt_t *)key;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||||
assert(hash1 != NULL);
|
assert(hash1 != NULL);
|
||||||
assert(hash2 != NULL);
|
assert(hash2 != NULL);
|
||||||
@ -1094,6 +1146,8 @@ prof_bt_keycomp(const void *k1, const void *k2)
|
|||||||
const prof_bt_t *bt1 = (prof_bt_t *)k1;
|
const prof_bt_t *bt1 = (prof_bt_t *)k1;
|
||||||
const prof_bt_t *bt2 = (prof_bt_t *)k2;
|
const prof_bt_t *bt2 = (prof_bt_t *)k2;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (bt1->len != bt2->len)
|
if (bt1->len != bt2->len)
|
||||||
return (false);
|
return (false);
|
||||||
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
|
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
|
||||||
@ -1104,6 +1158,8 @@ prof_tdata_init(void)
|
|||||||
{
|
{
|
||||||
prof_tdata_t *prof_tdata;
|
prof_tdata_t *prof_tdata;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/* Initialize an empty cache for this thread. */
|
/* Initialize an empty cache for this thread. */
|
||||||
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
|
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
|
||||||
if (prof_tdata == NULL)
|
if (prof_tdata == NULL)
|
||||||
@ -1138,6 +1194,8 @@ prof_tdata_cleanup(void *arg)
|
|||||||
prof_thr_cnt_t *cnt;
|
prof_thr_cnt_t *cnt;
|
||||||
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
|
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Delete the hash table. All of its contents can still be iterated
|
* Delete the hash table. All of its contents can still be iterated
|
||||||
* over via the LRU.
|
* over via the LRU.
|
||||||
@ -1161,6 +1219,8 @@ void
|
|||||||
prof_boot0(void)
|
prof_boot0(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
|
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
|
||||||
sizeof(PROF_PREFIX_DEFAULT));
|
sizeof(PROF_PREFIX_DEFAULT));
|
||||||
}
|
}
|
||||||
@ -1169,6 +1229,8 @@ void
|
|||||||
prof_boot1(void)
|
prof_boot1(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* opt_prof and prof_promote must be in their final state before any
|
* opt_prof and prof_promote must be in their final state before any
|
||||||
* arenas are initialized, so this function must be executed early.
|
* arenas are initialized, so this function must be executed early.
|
||||||
@ -1197,6 +1259,8 @@ bool
|
|||||||
prof_boot2(void)
|
prof_boot2(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
|
||||||
if (opt_prof) {
|
if (opt_prof) {
|
||||||
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
|
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
|
||||||
prof_bt_keycomp))
|
prof_bt_keycomp))
|
||||||
@ -1241,4 +1305,3 @@ prof_boot2(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#endif /* JEMALLOC_PROF */
|
|
||||||
|
13
src/stats.c
13
src/stats.c
@ -39,14 +39,11 @@
|
|||||||
|
|
||||||
bool opt_stats_print = false;
|
bool opt_stats_print = false;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
size_t stats_cactive = 0;
|
size_t stats_cactive = 0;
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
|
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, const char *format, va_list ap);
|
void *cbopaque, const char *format, va_list ap);
|
||||||
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
||||||
@ -55,10 +52,10 @@ static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
|
|||||||
void *cbopaque, unsigned i);
|
void *cbopaque, unsigned i);
|
||||||
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
||||||
void *cbopaque, unsigned i);
|
void *cbopaque, unsigned i);
|
||||||
#endif
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
|
/* XXX Refactor by adding malloc_vsnprintf(). */
|
||||||
/*
|
/*
|
||||||
* We don't want to depend on vsnprintf() for production builds, since that can
|
* We don't want to depend on vsnprintf() for production builds, since that can
|
||||||
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
|
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
|
||||||
@ -99,7 +96,6 @@ u2s(uint64_t x, unsigned base, char *s)
|
|||||||
return (&s[i]);
|
return (&s[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
static void
|
static void
|
||||||
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
const char *format, va_list ap)
|
const char *format, va_list ap)
|
||||||
@ -149,9 +145,7 @@ malloc_printf(const char *format, ...)
|
|||||||
malloc_vcprintf(NULL, NULL, format, ap);
|
malloc_vcprintf(NULL, NULL, format, ap);
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
static void
|
static void
|
||||||
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
unsigned i)
|
unsigned i)
|
||||||
@ -377,7 +371,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
stats_arena_bins_print(write_cb, cbopaque, i);
|
stats_arena_bins_print(write_cb, cbopaque, i);
|
||||||
stats_arena_lruns_print(write_cb, cbopaque, i);
|
stats_arena_lruns_print(write_cb, cbopaque, i);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
void
|
void
|
||||||
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||||
@ -674,8 +667,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
write_cb(cbopaque, ")\n");
|
write_cb(cbopaque, ")\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
{
|
|
||||||
int err;
|
int err;
|
||||||
size_t sszp, ssz;
|
size_t sszp, ssz;
|
||||||
size_t *cactive;
|
size_t *cactive;
|
||||||
@ -785,6 +777,5 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* #ifdef JEMALLOC_STATS */
|
|
||||||
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
|
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
|
||||||
}
|
}
|
||||||
|
130
src/tcache.c
130
src/tcache.c
@ -38,31 +38,22 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
arena_tcache_fill_small(tcache->arena, tbin, binind
|
arena_tcache_fill_small(tcache->arena, tbin, binind,
|
||||||
#ifdef JEMALLOC_PROF
|
config_prof ? tcache->prof_accumbytes : 0);
|
||||||
, tcache->prof_accumbytes
|
if (config_prof)
|
||||||
#endif
|
tcache->prof_accumbytes = 0;
|
||||||
);
|
|
||||||
#ifdef JEMALLOC_PROF
|
|
||||||
tcache->prof_accumbytes = 0;
|
|
||||||
#endif
|
|
||||||
ret = tcache_alloc_easy(tbin);
|
ret = tcache_alloc_easy(tbin);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
tcache_t *tcache)
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
unsigned i, nflush, ndeferred;
|
unsigned i, nflush, ndeferred;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
bool merged_stats = false;
|
bool merged_stats = false;
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(binind < nbins);
|
assert(binind < nbins);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
@ -74,25 +65,21 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
arena_t *arena = chunk->arena;
|
arena_t *arena = chunk->arena;
|
||||||
arena_bin_t *bin = &arena->bins[binind];
|
arena_bin_t *bin = &arena->bins[binind];
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof && arena == tcache->arena) {
|
||||||
if (arena == tcache->arena) {
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
tcache->prof_accumbytes = 0;
|
tcache->prof_accumbytes = 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && arena == tcache->arena) {
|
||||||
if (arena == tcache->arena) {
|
|
||||||
assert(merged_stats == false);
|
assert(merged_stats == false);
|
||||||
merged_stats = true;
|
merged_stats = true;
|
||||||
bin->stats.nflushes++;
|
bin->stats.nflushes++;
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
ndeferred = 0;
|
ndeferred = 0;
|
||||||
for (i = 0; i < nflush; i++) {
|
for (i = 0; i < nflush; i++) {
|
||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
@ -117,8 +104,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && merged_stats == false) {
|
||||||
if (merged_stats == false) {
|
|
||||||
/*
|
/*
|
||||||
* The flush loop didn't happen to flush to this thread's
|
* The flush loop didn't happen to flush to this thread's
|
||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
@ -130,7 +116,6 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||||
rem * sizeof(void *));
|
rem * sizeof(void *));
|
||||||
@ -140,17 +125,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
tcache_t *tcache)
|
||||||
, tcache_t *tcache
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
void *ptr;
|
void *ptr;
|
||||||
unsigned i, nflush, ndeferred;
|
unsigned i, nflush, ndeferred;
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
bool merged_stats = false;
|
bool merged_stats = false;
|
||||||
#endif
|
|
||||||
|
|
||||||
assert(binind < nhbins);
|
assert(binind < nhbins);
|
||||||
assert(rem <= tbin->ncached);
|
assert(rem <= tbin->ncached);
|
||||||
@ -162,23 +142,21 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
arena_t *arena = chunk->arena;
|
arena_t *arena = chunk->arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||||
if (arena == tcache->arena) {
|
if (config_prof) {
|
||||||
#endif
|
arena_prof_accum(arena,
|
||||||
#ifdef JEMALLOC_PROF
|
tcache->prof_accumbytes);
|
||||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
tcache->prof_accumbytes = 0;
|
||||||
tcache->prof_accumbytes = 0;
|
}
|
||||||
#endif
|
if (config_stats) {
|
||||||
#ifdef JEMALLOC_STATS
|
merged_stats = true;
|
||||||
merged_stats = true;
|
arena->stats.nrequests_large +=
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
arena->stats.lstats[binind - nbins].nrequests +=
|
arena->stats.lstats[binind - nbins].nrequests +=
|
||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
#endif
|
}
|
||||||
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
ndeferred = 0;
|
ndeferred = 0;
|
||||||
for (i = 0; i < nflush; i++) {
|
for (i = 0; i < nflush; i++) {
|
||||||
ptr = tbin->avail[i];
|
ptr = tbin->avail[i];
|
||||||
@ -199,8 +177,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
}
|
}
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && merged_stats == false) {
|
||||||
if (merged_stats == false) {
|
|
||||||
/*
|
/*
|
||||||
* The flush loop didn't happen to flush to this thread's
|
* The flush loop didn't happen to flush to this thread's
|
||||||
* arena, so the stats didn't get merged. Manually do so now.
|
* arena, so the stats didn't get merged. Manually do so now.
|
||||||
@ -213,7 +190,6 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||||
rem * sizeof(void *));
|
rem * sizeof(void *));
|
||||||
@ -254,13 +230,13 @@ tcache_create(arena_t *arena)
|
|||||||
if (tcache == NULL)
|
if (tcache == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
/* Link into list of extant tcaches. */
|
/* Link into list of extant tcaches. */
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
ql_elm_new(tcache, link);
|
ql_elm_new(tcache, link);
|
||||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
#endif
|
}
|
||||||
|
|
||||||
tcache->arena = arena;
|
tcache->arena = arena;
|
||||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||||
@ -282,43 +258,32 @@ tcache_destroy(tcache_t *tcache)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
size_t tcache_size;
|
size_t tcache_size;
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats) {
|
||||||
/* Unlink from list of extant tcaches. */
|
/* Unlink from list of extant tcaches. */
|
||||||
malloc_mutex_lock(&tcache->arena->lock);
|
malloc_mutex_lock(&tcache->arena->lock);
|
||||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||||
malloc_mutex_unlock(&tcache->arena->lock);
|
malloc_mutex_unlock(&tcache->arena->lock);
|
||||||
tcache_stats_merge(tcache, tcache->arena);
|
tcache_stats_merge(tcache, tcache->arena);
|
||||||
#endif
|
}
|
||||||
|
|
||||||
for (i = 0; i < nbins; i++) {
|
for (i = 0; i < nbins; i++) {
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
tcache_bin_flush_small(tbin, i, 0
|
tcache_bin_flush_small(tbin, i, 0, tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
if (tbin->tstats.nrequests != 0) {
|
|
||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
arena_bin_t *bin = &arena->bins[i];
|
arena_bin_t *bin = &arena->bins[i];
|
||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&bin->lock);
|
malloc_mutex_unlock(&bin->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (; i < nhbins; i++) {
|
for (; i < nhbins; i++) {
|
||||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||||
tcache_bin_flush_large(tbin, i, 0
|
tcache_bin_flush_large(tbin, i, 0, tcache);
|
||||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
|
||||||
, tcache
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||||
if (tbin->tstats.nrequests != 0) {
|
|
||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||||
@ -326,16 +291,13 @@ tcache_destroy(tcache_t *tcache)
|
|||||||
tbin->tstats.nrequests;
|
tbin->tstats.nrequests;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_PROF
|
if (config_prof && tcache->prof_accumbytes > 0) {
|
||||||
if (tcache->prof_accumbytes > 0) {
|
|
||||||
malloc_mutex_lock(&tcache->arena->lock);
|
malloc_mutex_lock(&tcache->arena->lock);
|
||||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
||||||
malloc_mutex_unlock(&tcache->arena->lock);
|
malloc_mutex_unlock(&tcache->arena->lock);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
tcache_size = arena_salloc(tcache);
|
tcache_size = arena_salloc(tcache);
|
||||||
if (tcache_size <= small_maxclass) {
|
if (tcache_size <= small_maxclass) {
|
||||||
@ -389,7 +351,6 @@ tcache_thread_cleanup(void *arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
void
|
void
|
||||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||||
{
|
{
|
||||||
@ -413,7 +374,6 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
|||||||
tbin->tstats.nrequests = 0;
|
tbin->tstats.nrequests = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
bool
|
bool
|
||||||
tcache_boot(void)
|
tcache_boot(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user