Reduce cpp conditional logic complexity.

Convert configuration-related cpp conditional logic to use static
constant variables, e.g.:

  #ifdef JEMALLOC_DEBUG
    [...]
  #endif

becomes:

  if (config_debug) {
    [...]
  }

The advantage is clearer, more concise code.  The main disadvantage is
that data structures no longer have conditionally defined fields, so
they pay the cost of all fields regardless of whether they are used.  In
practice, this is only a minor concern; config_stats will go away in an
upcoming change, and config_prof is the only other major feature that
depends on more than a few special-purpose fields.
This commit is contained in:
Jason Evans 2012-02-10 20:22:09 -08:00
parent b3bd885090
commit 7372b15a31
27 changed files with 1194 additions and 1725 deletions

View File

@ -174,6 +174,9 @@ AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
LD_PRELOAD_VAR="LD_PRELOAD"
so="so"
dnl Heap profiling uses the log(3) function.
LIBS="$LIBS -lm"
dnl Platform-specific settings. abi and RPATH can probably be determined
dnl programmatically, but doing so is error-prone, which makes it generally
dnl not worth the trouble.
@ -553,7 +556,6 @@ fi
AC_MSG_CHECKING([configured backtracing method])
AC_MSG_RESULT([$backtrace_method])
if test "x$enable_prof" = "x1" ; then
LIBS="$LIBS -lm"
AC_DEFINE([JEMALLOC_PROF], [ ])
fi
AC_SUBST([enable_prof])

View File

@ -16,11 +16,9 @@
#define SUBPAGE_CEILING(s) \
(((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK)
#ifdef JEMALLOC_TINY
/* Smallest size class to support. */
# define LG_TINY_MIN LG_SIZEOF_PTR
# define TINY_MIN (1U << LG_TINY_MIN)
#endif
/* Smallest size class to support. */
#define LG_TINY_MIN LG_SIZEOF_PTR
#define TINY_MIN (1U << LG_TINY_MIN)
/*
* Maximum size class that is a multiple of the quantum, but not (necessarily)
@ -85,6 +83,15 @@ typedef struct arena_s arena_t;
/* Each element of the chunk map corresponds to one page within the chunk. */
struct arena_chunk_map_s {
#ifndef JEMALLOC_PROF
/*
* Overlay prof_ctx in order to allow it to be referenced by dead code.
* Such antics aren't warranted for per arena data structures, but
* chunk map overhead accounts for a percentage of memory, rather than
* being just a fixed cost.
*/
union {
#endif
union {
/*
* Linkage for run trees. There are two disjoint uses:
@ -103,9 +110,10 @@ struct arena_chunk_map_s {
ql_elm(arena_chunk_map_t) ql_link;
} u;
#ifdef JEMALLOC_PROF
/* Profile counters, used for large object runs. */
prof_ctx_t *prof_ctx;
#ifndef JEMALLOC_PROF
}; /* union { ... }; */
#endif
/*
@ -162,10 +170,8 @@ struct arena_chunk_map_s {
* ssssssss ssssssss ssss---- ----D-LA
*/
size_t bits;
#ifdef JEMALLOC_PROF
#define CHUNK_MAP_CLASS_SHIFT 4
#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U)
#endif
#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU)
#define CHUNK_MAP_DIRTY ((size_t)0x8U)
#define CHUNK_MAP_UNZEROED ((size_t)0x4U)
@ -205,10 +211,8 @@ struct arena_chunk_s {
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
struct arena_run_s {
#ifdef JEMALLOC_DEBUG
uint32_t magic;
# define ARENA_RUN_MAGIC 0x384adf93
#endif
/* Bin this run is associated with. */
arena_bin_t *bin;
@ -247,13 +251,11 @@ struct arena_bin_info_s {
*/
bitmap_info_t bitmap_info;
#ifdef JEMALLOC_PROF
/*
* Offset of first (prof_ctx_t *) in a run header for this bin's size
* class, or 0 if (opt_prof == false).
* class, or 0 if (config_prof == false || opt_prof == false).
*/
uint32_t ctx0_offset;
#endif
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
@ -283,17 +285,13 @@ struct arena_bin_s {
*/
arena_run_tree_t runs;
#ifdef JEMALLOC_STATS
/* Bin statistics. */
malloc_bin_stats_t stats;
#endif
};
struct arena_s {
#ifdef JEMALLOC_DEBUG
uint32_t magic;
# define ARENA_MAGIC 0x947d3d24
#endif
/* This arena's index within the arenas array. */
unsigned ind;
@ -314,20 +312,14 @@ struct arena_s {
*/
malloc_mutex_t lock;
#ifdef JEMALLOC_STATS
arena_stats_t stats;
# ifdef JEMALLOC_TCACHE
/*
* List of tcaches for extant threads associated with this arena.
* Stats from these are merged incrementally, and at exit.
*/
ql_head(tcache_t) tcache_ql;
# endif
#endif
#ifdef JEMALLOC_PROF
uint64_t prof_accumbytes;
#endif
/* List of dirty-page-containing chunks this arena manages. */
ql_head(arena_chunk_t) chunks_dirty;
@ -455,35 +447,23 @@ extern size_t sspace_max;
#define nlclasses (chunk_npages - map_bias)
void arena_purge_all(arena_t *arena);
#ifdef JEMALLOC_PROF
void arena_prof_accum(arena_t *arena, uint64_t accumbytes);
#endif
#ifdef JEMALLOC_TCACHE
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind
# ifdef JEMALLOC_PROF
, uint64_t prof_accumbytes
# endif
);
#endif
size_t binind, uint64_t prof_accumbytes);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_malloc(size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alloc_size,
size_t alignment, bool zero);
size_t arena_salloc(const void *ptr);
#ifdef JEMALLOC_PROF
void arena_prof_promoted(const void *ptr, size_t size);
size_t arena_salloc_demote(const void *ptr);
#endif
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#ifdef JEMALLOC_STATS
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
#endif
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
@ -499,10 +479,8 @@ bool arena_boot(void);
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
# ifdef JEMALLOC_PROF
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
# endif
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr);
#endif
@ -521,7 +499,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
unsigned shift, diff, regind;
size_t size;
dassert(run->magic == ARENA_RUN_MAGIC);
assert(run->magic == ARENA_RUN_MAGIC);
/*
* Freeing a pointer lower than region zero can cause assertion
* failure.
@ -586,7 +564,6 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
return (regind);
}
#ifdef JEMALLOC_PROF
JEMALLOC_INLINE prof_ctx_t *
arena_prof_ctx_get(const void *ptr)
{
@ -594,6 +571,7 @@ arena_prof_ctx_get(const void *ptr)
arena_chunk_t *chunk;
size_t pageind, mapbits;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
@ -612,7 +590,7 @@ arena_prof_ctx_get(const void *ptr)
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind;
dassert(run->magic == ARENA_RUN_MAGIC);
assert(run->magic == ARENA_RUN_MAGIC);
regind = arena_run_regind(run, bin_info, ptr);
ret = *(prof_ctx_t **)((uintptr_t)run +
bin_info->ctx0_offset + (regind *
@ -630,6 +608,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
arena_chunk_t *chunk;
size_t pageind, mapbits;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
@ -647,7 +626,7 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
arena_bin_info_t *bin_info;
unsigned regind;
dassert(run->magic == ARENA_RUN_MAGIC);
assert(run->magic == ARENA_RUN_MAGIC);
binind = arena_bin_index(chunk->arena, bin);
bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr);
@ -659,7 +638,6 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
} else
chunk->map[pageind-map_bias].prof_ctx = ctx;
}
#endif
JEMALLOC_INLINE void
arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
@ -668,7 +646,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
arena_chunk_map_t *mapelm;
assert(arena != NULL);
dassert(arena->magic == ARENA_MAGIC);
assert(arena->magic == ARENA_MAGIC);
assert(chunk->arena == arena);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
@ -678,63 +656,57 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
/* Small allocation. */
#ifdef JEMALLOC_TCACHE
tcache_t *tcache;
if ((tcache = tcache_get()) != NULL)
if (config_tcache && (tcache = tcache_get()) != NULL)
tcache_dalloc_small(tcache, ptr);
else {
#endif
arena_run_t *run;
arena_bin_t *bin;
run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapelm->bits >>
PAGE_SHIFT)) << PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
assert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
#ifdef JEMALLOC_DEBUG
{
if (config_debug) {
size_t binind = arena_bin_index(arena, bin);
arena_bin_info_t *bin_info =
UNUSED arena_bin_info_t *bin_info =
&arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) %
bin_info->reg_size == 0);
}
#endif
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin(arena, chunk, ptr, mapelm);
malloc_mutex_unlock(&bin->lock);
#ifdef JEMALLOC_TCACHE
}
#endif
} else {
#ifdef JEMALLOC_TCACHE
size_t size = mapelm->bits & ~PAGE_MASK;
if (config_tcache) {
size_t size = mapelm->bits & ~PAGE_MASK;
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (size <= tcache_maxclass) {
tcache_t *tcache;
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (size <= tcache_maxclass) {
tcache_t *tcache;
if ((tcache = tcache_get()) != NULL)
tcache_dalloc_large(tcache, ptr, size);
else {
if ((tcache = tcache_get()) != NULL)
tcache_dalloc_large(tcache, ptr, size);
else {
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
}
} else {
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
}
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
}
#else
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
malloc_mutex_lock(&arena->lock);
arena_dalloc_large(arena, chunk, ptr);
malloc_mutex_unlock(&arena->lock);
#endif
}
}
#endif

View File

@ -28,20 +28,14 @@
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
#ifdef JEMALLOC_SWAP
extern bool opt_overcommit;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
/* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx;
/* Chunk statistics. */
extern chunk_stats_t stats_chunks;
#endif
#ifdef JEMALLOC_IVSALLOC
extern rtree_t *chunks_rtree;
#endif
extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */

View File

@ -1,4 +1,3 @@
#ifdef JEMALLOC_DSS
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
@ -27,4 +26,3 @@ bool chunk_dss_boot(void);
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_DSS */

View File

@ -1,4 +1,3 @@
#ifdef JEMALLOC_SWAP
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
@ -15,9 +14,7 @@ extern bool swap_enabled;
extern bool swap_prezeroed;
extern size_t swap_nfds;
extern int *swap_fds;
#ifdef JEMALLOC_STATS
extern size_t swap_avail;
#endif
void *chunk_alloc_swap(size_t size, bool *zero);
bool chunk_in_swap(void *chunk);
@ -31,4 +28,3 @@ bool chunk_swap_boot(void);
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_SWAP */

View File

@ -30,10 +30,8 @@ struct ckhc_s {
};
struct ckh_s {
#ifdef JEMALLOC_DEBUG
#define CKH_MAGIC 0x3af2489d
uint32_t magic;
#endif
#ifdef CKH_COUNT
/* Counters used to get an idea of performance. */

View File

@ -32,7 +32,6 @@ struct ctl_arena_stats_s {
unsigned nthreads;
size_t pactive;
size_t pdirty;
#ifdef JEMALLOC_STATS
arena_stats_t astats;
/* Aggregate stats for small size classes, based on bin stats. */
@ -43,11 +42,9 @@ struct ctl_arena_stats_s {
malloc_bin_stats_t *bstats; /* nbins elements. */
malloc_large_stats_t *lstats; /* nlclasses elements. */
#endif
};
struct ctl_stats_s {
#ifdef JEMALLOC_STATS
size_t allocated;
size_t active;
size_t mapped;
@ -61,11 +58,8 @@ struct ctl_stats_s {
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
#endif
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
#ifdef JEMALLOC_SWAP
size_t swap_avail;
#endif
};
#endif /* JEMALLOC_H_STRUCTS */

View File

@ -9,18 +9,14 @@ typedef struct extent_node_s extent_node_t;
/* Tree of extents. */
struct extent_node_s {
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) link_szad;
#endif
/* Linkage for the address-ordered tree. */
rb_node(extent_node_t) link_ad;
#ifdef JEMALLOC_PROF
/* Profile counters, used for huge objects. */
prof_ctx_t *prof_ctx;
#endif
/* Pointer to the extent that this tree node is responsible for. */
void *addr;
@ -34,9 +30,7 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t)
#endif
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)

View File

@ -9,12 +9,10 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_STATS
/* Huge allocation statistics. */
extern uint64_t huge_nmalloc;
extern uint64_t huge_ndalloc;
extern size_t huge_allocated;
#endif
/* Protects chunk-related data structures. */
extern malloc_mutex_t huge_mtx;
@ -27,10 +25,8 @@ void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero);
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
#ifdef JEMALLOC_PROF
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
#endif
bool huge_boot(void);
#endif /* JEMALLOC_H_EXTERNS */

View File

@ -35,6 +35,125 @@
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
#define UNUSED JEMALLOC_ATTR(unused)
#else
#define UNUSED
#endif
static const bool config_debug =
#ifdef JEMALLOC_DEBUG
true
#else
false
#endif
;
static const bool config_dss =
#ifdef JEMALLOC_DSS
true
#else
false
#endif
;
static const bool config_dynamic_page_shift =
#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
true
#else
false
#endif
;
static const bool config_fill =
#ifdef JEMALLOC_FILL
true
#else
false
#endif
;
static const bool config_lazy_lock =
#ifdef JEMALLOC_LAZY_LOCK
true
#else
false
#endif
;
static const bool config_prof =
#ifdef JEMALLOC_PROF
true
#else
false
#endif
;
static const bool config_prof_libgcc =
#ifdef JEMALLOC_PROF_LIBGCC
true
#else
false
#endif
;
static const bool config_prof_libunwind =
#ifdef JEMALLOC_PROF_LIBUNWIND
true
#else
false
#endif
;
static const bool config_stats =
#ifdef JEMALLOC_STATS
true
#else
false
#endif
;
static const bool config_swap =
#ifdef JEMALLOC_SWAP
true
#else
false
#endif
;
static const bool config_sysv =
#ifdef JEMALLOC_SYSV
true
#else
false
#endif
;
static const bool config_tcache =
#ifdef JEMALLOC_TCACHE
true
#else
false
#endif
;
static const bool config_tiny =
#ifdef JEMALLOC_TINY
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
#else
false
#endif
;
static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC
true
#else
false
#endif
;
static const bool config_ivsalloc =
#ifdef JEMALLOC_IVSALLOC
true
#else
false
#endif
;
#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
#include <libkern/OSAtomic.h>
#endif
@ -82,11 +201,11 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
# endif
#endif
#ifdef JEMALLOC_DEBUG
# define dassert(e) assert(e)
#else
# define dassert(e)
#endif
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
#define cassert(c) do { \
if ((c) == false) \
assert(false); \
} while (0)
/*
* jemalloc can conceptually be broken into components (arena, tcache, etc.),
@ -265,30 +384,20 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#endif
#include "jemalloc/internal/prof.h"
#ifdef JEMALLOC_STATS
typedef struct {
uint64_t allocated;
uint64_t deallocated;
} thread_allocated_t;
#endif
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
#ifdef JEMALLOC_FILL
extern bool opt_junk;
#endif
#ifdef JEMALLOC_SYSV
extern bool opt_sysv;
#endif
#ifdef JEMALLOC_XMALLOC
extern bool opt_xmalloc;
#endif
#ifdef JEMALLOC_FILL
extern bool opt_zero;
#endif
extern size_t opt_narenas;
#ifdef DYNAMIC_PAGE_SHIFT
@ -327,8 +436,7 @@ extern __thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
extern arena_t **arenas;
extern unsigned narenas;
#ifdef JEMALLOC_STATS
# ifndef NO_TLS
#ifndef NO_TLS
extern __thread thread_allocated_t thread_allocated_tls;
# define ALLOCATED_GET() (thread_allocated_tls.allocated)
# define ALLOCATEDP_GET() (&thread_allocated_tls.allocated)
@ -338,10 +446,7 @@ extern __thread thread_allocated_t thread_allocated_tls;
thread_allocated_tls.allocated += a; \
thread_allocated_tls.deallocated += d; \
} while (0)
# else
extern pthread_key_t thread_allocated_tsd;
thread_allocated_t *thread_allocated_get_hard(void);
#else
# define ALLOCATED_GET() (thread_allocated_get()->allocated)
# define ALLOCATEDP_GET() (&thread_allocated_get()->allocated)
# define DEALLOCATED_GET() (thread_allocated_get()->deallocated)
@ -351,8 +456,9 @@ thread_allocated_t *thread_allocated_get_hard(void);
thread_allocated->allocated += (a); \
thread_allocated->deallocated += (d); \
} while (0)
# endif
#endif
extern pthread_key_t thread_allocated_tsd;
thread_allocated_t *thread_allocated_get_hard(void);
arena_t *arenas_extend(unsigned ind);
arena_t *choose_arena_hard(void);
@ -403,9 +509,7 @@ size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment, size_t *run_size_p);
void malloc_write(const char *s);
arena_t *choose_arena(void);
# if (defined(JEMALLOC_STATS) && defined(NO_TLS))
thread_allocated_t *thread_allocated_get(void);
# endif
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
@ -565,7 +669,6 @@ choose_arena(void)
return (ret);
}
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
JEMALLOC_INLINE thread_allocated_t *
thread_allocated_get(void)
{
@ -577,7 +680,6 @@ thread_allocated_get(void)
return (thread_allocated);
}
#endif
#endif
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
@ -593,9 +695,7 @@ void *imalloc(size_t size);
void *icalloc(size_t size);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr);
# ifdef JEMALLOC_IVSALLOC
size_t ivsalloc(const void *ptr);
# endif
void idalloc(void *ptr);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
@ -674,20 +774,18 @@ isalloc(const void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
assert(chunk->arena->magic == ARENA_MAGIC);
#ifdef JEMALLOC_PROF
ret = arena_salloc_demote(ptr);
#else
ret = arena_salloc(ptr);
#endif
if (config_prof)
ret = arena_salloc_demote(ptr);
else
ret = arena_salloc(ptr);
} else
ret = huge_salloc(ptr);
return (ret);
}
#ifdef JEMALLOC_IVSALLOC
JEMALLOC_INLINE size_t
ivsalloc(const void *ptr)
{
@ -698,7 +796,6 @@ ivsalloc(const void *ptr)
return (isalloc(ptr));
}
#endif
JEMALLOC_INLINE void
idalloc(void *ptr)

View File

@ -3,14 +3,14 @@
#ifdef JEMALLOC_OSSPIN
typedef OSSpinLock malloc_mutex_t;
#define MALLOC_MUTEX_INITIALIZER 0
#else
typedef pthread_mutex_t malloc_mutex_t;
#endif
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
#else
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
# ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# define MALLOC_MUTEX_INITIALIZER PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
# else
# define MALLOC_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
# endif
#endif
#endif /* JEMALLOC_H_TYPES */

View File

@ -1,4 +1,3 @@
#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
@ -297,6 +296,8 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
uint64_t r;
double u;
cassert(config_prof);
/*
* Compute sample threshold as a geometrically distributed random
* variable with mean (2^opt_lg_prof_sample).
@ -329,12 +330,13 @@ prof_ctx_get(const void *ptr)
prof_ctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
assert(chunk->arena->magic == ARENA_MAGIC);
ret = arena_prof_ctx_get(ptr);
} else
@ -348,12 +350,13 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
dassert(chunk->arena->magic == ARENA_MAGIC);
assert(chunk->arena->magic == ARENA_MAGIC);
arena_prof_ctx_set(ptr, ctx);
} else
@ -365,6 +368,7 @@ prof_sample_accum_update(size_t size)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* Sampling logic is unnecessary if the interval is 1. */
assert(opt_lg_prof_sample != 0);
@ -391,6 +395,7 @@ JEMALLOC_INLINE void
prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
assert(size == isalloc(ptr));
@ -437,6 +442,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
{
prof_thr_cnt_t *told_cnt;
cassert(config_prof);
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
@ -510,6 +516,8 @@ prof_free(const void *ptr, size_t size)
{
prof_ctx_t *ctx = prof_ctx_get(ptr);
cassert(config_prof);
if ((uintptr_t)ctx > (uintptr_t)1) {
assert(size == isalloc(ptr));
prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt);
@ -544,4 +552,3 @@ prof_free(const void *ptr, size_t size)
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/
#endif /* JEMALLOC_PROF */

View File

@ -3,23 +3,16 @@
#define UMAX2S_BUFSIZE 65
#ifdef JEMALLOC_STATS
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
typedef struct malloc_large_stats_s malloc_large_stats_t;
typedef struct arena_stats_s arena_stats_t;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
typedef struct chunk_stats_s chunk_stats_t;
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#ifdef JEMALLOC_STATS
#ifdef JEMALLOC_TCACHE
struct tcache_bin_stats_s {
/*
* Number of allocation requests that corresponded to the size of this
@ -27,7 +20,6 @@ struct tcache_bin_stats_s {
*/
uint64_t nrequests;
};
#endif
struct malloc_bin_stats_s {
/*
@ -52,13 +44,11 @@ struct malloc_bin_stats_s {
*/
uint64_t nrequests;
#ifdef JEMALLOC_TCACHE
/* Number of tcache fills from this bin. */
uint64_t nfills;
/* Number of tcache flushes to this bin. */
uint64_t nflushes;
#endif
/* Total number of runs created for this bin's size class. */
uint64_t nruns;
@ -127,14 +117,10 @@ struct arena_stats_s {
*/
malloc_large_stats_t *lstats;
};
#endif /* JEMALLOC_STATS */
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
struct chunk_stats_s {
# ifdef JEMALLOC_STATS
/* Number of chunks that were allocated. */
uint64_t nchunks;
# endif
/* High-water mark for number of chunks allocated. */
size_t highchunks;
@ -146,7 +132,6 @@ struct chunk_stats_s {
*/
size_t curchunks;
};
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
@ -154,24 +139,19 @@ struct chunk_stats_s {
extern bool opt_stats_print;
#ifdef JEMALLOC_STATS
extern size_t stats_cactive;
#endif
char *u2s(uint64_t x, unsigned base, char *s);
#ifdef JEMALLOC_STATS
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4));
void malloc_printf(const char *format, ...)
JEMALLOC_ATTR(format(printf, 1, 2));
#endif
void stats_print(void (*write)(void *, const char *), void *cbopaque,
const char *opts);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_STATS
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
@ -202,6 +182,5 @@ stats_cactive_sub(size_t size)
}
#endif
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -42,9 +42,7 @@ struct tcache_bin_info_s {
};
struct tcache_bin_s {
# ifdef JEMALLOC_STATS
tcache_bin_stats_t tstats;
# endif
int low_water; /* Min # cached since last GC. */
unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */
unsigned ncached; /* # of cached objects. */
@ -52,12 +50,8 @@ struct tcache_bin_s {
};
struct tcache_s {
# ifdef JEMALLOC_STATS
ql_elm(tcache_t) link; /* Used for aggregating stats. */
# endif
# ifdef JEMALLOC_PROF
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
# endif
arena_t *arena; /* This thread's arena. */
unsigned ev_cnt; /* Event count since incremental GC. */
unsigned next_gc_bin; /* Next bin to GC. */
@ -109,23 +103,15 @@ extern size_t tcache_maxclass;
/* Number of tcache allocation/deallocation events between incremental GCs. */
extern unsigned tcache_gc_incr;
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
);
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache);
tcache_t *tcache_create(arena_t *arena);
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
size_t binind);
void tcache_destroy(tcache_t *tcache);
#ifdef JEMALLOC_STATS
void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
#endif
bool tcache_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
@ -195,19 +181,11 @@ tcache_event(tcache_t *tcache)
if (binind < nbins) {
tcache_bin_flush_small(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
(tbin->low_water >> 2), tcache);
} else {
tcache_bin_flush_large(tbin, binind,
tbin->ncached - tbin->low_water +
(tbin->low_water >> 2)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
(tbin->low_water >> 2), tcache);
}
/*
* Reduce fill count by 2X. Limit lg_fill_div such that
@ -268,21 +246,19 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
assert(arena_salloc(ret) == arena_bin_info[binind].reg_size);
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else
memset(ret, 0, size);
#ifdef JEMALLOC_STATS
tbin->tstats.nrequests++;
#endif
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
#endif
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += arena_bin_info[binind].reg_size;
tcache_event(tcache);
return (ret);
}
@ -309,28 +285,28 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
if (ret == NULL)
return (NULL);
} else {
#ifdef JEMALLOC_PROF
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
PAGE_SHIFT);
chunk->map[pageind-map_bias].bits &= ~CHUNK_MAP_CLASS_MASK;
#endif
if (config_prof) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
PAGE_SHIFT);
chunk->map[pageind-map_bias].bits &=
~CHUNK_MAP_CLASS_MASK;
}
if (zero == false) {
#ifdef JEMALLOC_FILL
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
#endif
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
} else
memset(ret, 0, size);
#ifdef JEMALLOC_STATS
tbin->tstats.nrequests++;
#endif
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes += size;
#endif
if (config_stats)
tbin->tstats.nrequests++;
if (config_prof)
tcache->prof_accumbytes += size;
}
tcache_event(tcache);
@ -357,26 +333,20 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
mapelm = &chunk->map[pageind-map_bias];
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
dassert(run->magic == ARENA_RUN_MAGIC);
assert(run->magic == ARENA_RUN_MAGIC);
bin = run->bin;
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
sizeof(arena_bin_t);
assert(binind < nbins);
#ifdef JEMALLOC_FILL
if (opt_junk)
if (config_fill && opt_junk)
memset(ptr, 0x5a, arena_bin_info[binind].reg_size);
#endif
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >>
1)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;
@ -403,20 +373,14 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
binind = nbins + (size >> PAGE_SHIFT) - 1;
#ifdef JEMALLOC_FILL
if (opt_junk)
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
#endif
tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind];
if (tbin->ncached == tbin_info->ncached_max) {
tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >>
1)
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
1), tcache);
}
assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr;

View File

@ -48,9 +48,11 @@
/* Defined if __attribute__((...)) syntax is supported. */
#undef JEMALLOC_HAVE_ATTR
#ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_CATTR(s, a) __attribute__((s))
# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_CATTR(s, a) a
# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,)
#endif
/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */

File diff suppressed because it is too large Load Diff

View File

@ -5,18 +5,12 @@
/* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
#ifdef JEMALLOC_SWAP
bool opt_overcommit = true;
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
#endif
#ifdef JEMALLOC_IVSALLOC
rtree_t *chunks_rtree;
#endif
/* Various chunk-related settings. */
size_t chunksize;
@ -41,67 +35,50 @@ chunk_alloc(size_t size, bool base, bool *zero)
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_SWAP
if (swap_enabled) {
if (config_swap && swap_enabled) {
ret = chunk_alloc_swap(size, zero);
if (ret != NULL)
goto RETURN;
}
if (swap_enabled == false || opt_overcommit) {
#endif
#ifdef JEMALLOC_DSS
ret = chunk_alloc_dss(size, zero);
if (ret != NULL)
goto RETURN;
#endif
if (config_dss) {
ret = chunk_alloc_dss(size, zero);
if (ret != NULL)
goto RETURN;
}
ret = chunk_alloc_mmap(size);
if (ret != NULL) {
*zero = true;
goto RETURN;
}
#ifdef JEMALLOC_SWAP
}
#endif
/* All strategies for allocation failed. */
ret = NULL;
RETURN:
#ifdef JEMALLOC_IVSALLOC
if (base == false && ret != NULL) {
if (config_ivsalloc && base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size, true);
return (NULL);
}
}
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (ret != NULL) {
# ifdef JEMALLOC_PROF
if ((config_stats || config_prof) && ret != NULL) {
bool gdump;
# endif
malloc_mutex_lock(&chunks_mtx);
# ifdef JEMALLOC_STATS
stats_chunks.nchunks += (size / chunksize);
# endif
if (config_stats)
stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize);
if (stats_chunks.curchunks > stats_chunks.highchunks) {
stats_chunks.highchunks = stats_chunks.curchunks;
# ifdef JEMALLOC_PROF
gdump = true;
# endif
}
# ifdef JEMALLOC_PROF
else
if (config_prof)
gdump = true;
} else if (config_prof)
gdump = false;
# endif
malloc_mutex_unlock(&chunks_mtx);
# ifdef JEMALLOC_PROF
if (opt_prof && opt_prof_gdump && gdump)
if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump();
# endif
}
#endif
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
@ -116,24 +93,20 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert(size != 0);
assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_IVSALLOC
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
#endif
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_lock(&chunks_mtx);
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
#endif
if (config_ivsalloc)
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
}
if (unmap) {
#ifdef JEMALLOC_SWAP
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
size) == false)
return;
#endif
#ifdef JEMALLOC_DSS
if (chunk_dealloc_dss(chunk, size) == false)
if (config_dss && chunk_dealloc_dss(chunk, size) == false)
return;
#endif
chunk_dealloc_mmap(chunk, size);
}
}
@ -148,26 +121,23 @@ chunk_boot(void)
chunksize_mask = chunksize - 1;
chunk_npages = (chunksize >> PAGE_SHIFT);
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
if (malloc_mutex_init(&chunks_mtx))
if (config_stats || config_prof) {
if (malloc_mutex_init(&chunks_mtx))
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
}
if (config_swap && chunk_swap_boot())
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
#endif
#ifdef JEMALLOC_SWAP
if (chunk_swap_boot())
return (true);
#endif
if (chunk_mmap_boot())
return (true);
#ifdef JEMALLOC_DSS
if (chunk_dss_boot())
if (config_dss && chunk_dss_boot())
return (true);
#endif
#ifdef JEMALLOC_IVSALLOC
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
#endif
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk);
if (chunks_rtree == NULL)
return (true);
}
return (false);
}

View File

@ -1,6 +1,5 @@
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_DSS
/******************************************************************************/
/* Data. */
@ -35,6 +34,8 @@ chunk_recycle_dss(size_t size, bool *zero)
{
extent_node_t *node, key;
cassert(config_dss);
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&dss_mtx);
@ -74,6 +75,8 @@ chunk_alloc_dss(size_t size, bool *zero)
{
void *ret;
cassert(config_dss);
ret = chunk_recycle_dss(size, zero);
if (ret != NULL)
return (ret);
@ -131,6 +134,8 @@ chunk_dealloc_dss_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
cassert(config_dss);
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
@ -204,6 +209,8 @@ chunk_in_dss(void *chunk)
{
bool ret;
cassert(config_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
@ -220,6 +227,8 @@ chunk_dealloc_dss(void *chunk, size_t size)
{
bool ret;
cassert(config_dss);
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
@ -269,6 +278,8 @@ bool
chunk_dss_boot(void)
{
cassert(config_dss);
if (malloc_mutex_init(&dss_mtx))
return (true);
dss_base = sbrk(0);
@ -281,4 +292,3 @@ chunk_dss_boot(void)
}
/******************************************************************************/
#endif /* JEMALLOC_DSS */

View File

@ -1,6 +1,6 @@
#define JEMALLOC_CHUNK_SWAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_SWAP
/******************************************************************************/
/* Data. */
@ -9,9 +9,7 @@ bool swap_enabled;
bool swap_prezeroed;
size_t swap_nfds;
int *swap_fds;
#ifdef JEMALLOC_STATS
size_t swap_avail;
#endif
/* Base address of the mmap()ed file(s). */
static void *swap_base;
@ -42,6 +40,8 @@ chunk_recycle_swap(size_t size, bool *zero)
{
extent_node_t *node, key;
cassert(config_swap);
key.addr = NULL;
key.size = size;
malloc_mutex_lock(&swap_mtx);
@ -65,9 +65,8 @@ chunk_recycle_swap(size_t size, bool *zero)
node->size -= size;
extent_tree_szad_insert(&swap_chunks_szad, node);
}
#ifdef JEMALLOC_STATS
swap_avail -= size;
#endif
if (config_stats)
swap_avail -= size;
malloc_mutex_unlock(&swap_mtx);
if (*zero)
@ -84,6 +83,7 @@ chunk_alloc_swap(size_t size, bool *zero)
{
void *ret;
cassert(config_swap);
assert(swap_enabled);
ret = chunk_recycle_swap(size, zero);
@ -94,9 +94,8 @@ chunk_alloc_swap(size_t size, bool *zero)
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
ret = swap_end;
swap_end = (void *)((uintptr_t)swap_end + size);
#ifdef JEMALLOC_STATS
swap_avail -= size;
#endif
if (config_stats)
swap_avail -= size;
malloc_mutex_unlock(&swap_mtx);
if (swap_prezeroed)
@ -116,6 +115,8 @@ chunk_dealloc_swap_record(void *chunk, size_t size)
{
extent_node_t *xnode, *node, *prev, key;
cassert(config_swap);
xnode = NULL;
while (true) {
key.addr = (void *)((uintptr_t)chunk + size);
@ -189,6 +190,7 @@ chunk_in_swap(void *chunk)
{
bool ret;
cassert(config_swap);
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
@ -207,6 +209,7 @@ chunk_dealloc_swap(void *chunk, size_t size)
{
bool ret;
cassert(config_swap);
assert(swap_enabled);
malloc_mutex_lock(&swap_mtx);
@ -237,9 +240,8 @@ chunk_dealloc_swap(void *chunk, size_t size)
} else
madvise(chunk, size, MADV_DONTNEED);
#ifdef JEMALLOC_STATS
swap_avail += size;
#endif
if (config_stats)
swap_avail += size;
ret = false;
goto RETURN;
}
@ -260,6 +262,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
size_t cumsize, voff;
size_t sizes[nfds];
cassert(config_swap);
malloc_mutex_lock(&swap_mtx);
/* Get file sizes. */
@ -362,9 +366,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
memcpy(swap_fds, fds, nfds * sizeof(int));
swap_nfds = nfds;
#ifdef JEMALLOC_STATS
swap_avail = cumsize;
#endif
if (config_stats)
swap_avail = cumsize;
swap_enabled = true;
@ -378,6 +381,8 @@ bool
chunk_swap_boot(void)
{
cassert(config_swap);
if (malloc_mutex_init(&swap_mtx))
return (true);
@ -385,9 +390,8 @@ chunk_swap_boot(void)
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
swap_nfds = 0;
swap_fds = NULL;
#ifdef JEMALLOC_STATS
swap_avail = 0;
#endif
if (config_stats)
swap_avail = 0;
swap_base = NULL;
swap_end = NULL;
swap_max = NULL;
@ -397,6 +401,3 @@ chunk_swap_boot(void)
return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_SWAP */

View File

@ -73,7 +73,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
size_t hash1, hash2, bucket, cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh->magic == CKH_MAGIC);
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
@ -394,9 +394,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
goto RETURN;
}
#ifdef JEMALLOC_DEBUG
ckh->magic = CKH_MAGIC;
#endif
if (config_debug)
ckh->magic = CKH_MAGIC;
ret = false;
RETURN:
@ -408,7 +407,7 @@ ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh->magic == CKH_MAGIC);
#ifdef CKH_VERBOSE
malloc_printf(
@ -433,7 +432,7 @@ ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh->magic == CKH_MAGIC);
return (ckh->count);
}
@ -464,7 +463,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
bool ret;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh->magic == CKH_MAGIC);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
@ -489,7 +488,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@ -521,7 +520,7 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
dassert(ckh->magic == CKH_MAGIC);
assert(ckh->magic == CKH_MAGIC);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {

620
src/ctl.c

File diff suppressed because it is too large Load Diff

View File

@ -3,7 +3,6 @@
/******************************************************************************/
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
#endif
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)

View File

@ -4,11 +4,9 @@
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
#endif
malloc_mutex_t huge_mtx;
@ -49,21 +47,19 @@ huge_malloc(size_t size, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
if (config_stats) {
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
}
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
else if (opt_zero)
memset(ret, 0, csize);
}
#endif
return (ret);
}
@ -134,21 +130,19 @@ huge_palloc(size_t size, size_t alignment, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
if (config_stats) {
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
}
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_FILL
if (zero == false) {
if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, chunk_size);
else if (opt_zero)
memset(ret, 0, chunk_size);
}
#endif
return (ret);
}
@ -164,12 +158,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
#ifdef JEMALLOC_FILL
if (opt_junk && size < oldsize) {
if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
#endif
return (ptr);
}
@ -223,15 +215,10 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* source nor the destination are in swap or dss.
*/
#ifdef JEMALLOC_MREMAP_FIXED
if (oldsize >= chunksize
# ifdef JEMALLOC_SWAP
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
chunk_in_swap(ret) == false))
# endif
# ifdef JEMALLOC_DSS
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
# endif
) {
if (oldsize >= chunksize && (config_swap == false || swap_enabled ==
false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) ==
false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
/*
@ -285,23 +272,16 @@ huge_dalloc(void *ptr, bool unmap)
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif
if (config_stats) {
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
}
malloc_mutex_unlock(&huge_mtx);
if (unmap) {
/* Unmap chunk. */
#ifdef JEMALLOC_FILL
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
if (opt_junk)
memset(node->addr, 0x5a, node->size);
#endif
#endif
}
if (unmap && config_fill && (config_swap || config_dss) && opt_junk)
memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap);
@ -328,7 +308,6 @@ huge_salloc(const void *ptr)
return (ret);
}
#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
@ -365,7 +344,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
malloc_mutex_unlock(&huge_mtx);
}
#endif
bool
huge_boot(void)
@ -376,11 +354,11 @@ huge_boot(void)
return (true);
extent_tree_ad_new(&huge);
#ifdef JEMALLOC_STATS
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
#endif
if (config_stats) {
huge_nmalloc = 0;
huge_ndalloc = 0;
huge_allocated = 0;
}
return (false);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,5 @@
#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_internal.h"
#ifdef JEMALLOC_PROF
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
@ -102,6 +101,8 @@ void
bt_init(prof_bt_t *bt, void **vec)
{
cassert(config_prof);
bt->vec = vec;
bt->len = 0;
}
@ -110,6 +111,8 @@ static void
bt_destroy(prof_bt_t *bt)
{
cassert(config_prof);
idalloc(bt);
}
@ -118,6 +121,8 @@ bt_dup(prof_bt_t *bt)
{
prof_bt_t *ret;
cassert(config_prof);
/*
* Create a single allocation that has space for vec immediately
* following the prof_bt_t structure. The backtraces that get
@ -141,6 +146,8 @@ static inline void
prof_enter(void)
{
cassert(config_prof);
malloc_mutex_lock(&enq_mtx);
enq = true;
malloc_mutex_unlock(&enq_mtx);
@ -153,6 +160,8 @@ prof_leave(void)
{
bool idump, gdump;
cassert(config_prof);
malloc_mutex_unlock(&bt2ctx_mtx);
malloc_mutex_lock(&enq_mtx);
@ -178,6 +187,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
unsigned i;
int err;
cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
assert(max <= (1U << opt_lg_prof_bt_max));
@ -204,12 +214,13 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
break;
}
}
#endif
#ifdef JEMALLOC_PROF_LIBGCC
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
{
cassert(config_prof);
return (_URC_NO_REASON);
}
@ -218,6 +229,8 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg)
{
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
cassert(config_prof);
if (data->nignore > 0)
data->nignore--;
else {
@ -235,10 +248,11 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
prof_unwind_data_t data = {bt, nignore, max};
cassert(config_prof);
_Unwind_Backtrace(prof_unwind_callback, &data);
}
#endif
#ifdef JEMALLOC_PROF_GCC
#elif (defined(JEMALLOC_PROF_GCC))
void
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
@ -257,6 +271,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
} else \
return;
cassert(config_prof);
assert(nignore <= 3);
assert(max <= (1U << opt_lg_prof_bt_max));
@ -407,6 +422,14 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
BT_FRAME(130)
#undef BT_FRAME
}
#else
void
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
{
cassert(config_prof);
assert(false);
}
#endif
prof_thr_cnt_t *
@ -418,6 +441,8 @@ prof_lookup(prof_bt_t *bt)
} ret;
prof_tdata_t *prof_tdata;
cassert(config_prof);
prof_tdata = PROF_TCACHE_GET();
if (prof_tdata == NULL) {
prof_tdata = prof_tdata_init();
@ -553,6 +578,8 @@ prof_flush(bool propagate_err)
bool ret = false;
ssize_t err;
cassert(config_prof);
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
if (propagate_err == false) {
@ -573,6 +600,8 @@ prof_write(const char *s, bool propagate_err)
{
unsigned i, slen, n;
cassert(config_prof);
i = 0;
slen = strlen(s);
while (i < slen) {
@ -602,6 +631,8 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
cassert(config_prof);
malloc_mutex_lock(&ctx->lock);
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
@ -648,6 +679,8 @@ static void
prof_ctx_destroy(prof_ctx_t *ctx)
{
cassert(config_prof);
/*
* Check that ctx is still unused by any thread cache before destroying
* it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
@ -686,6 +719,8 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
{
bool destroy;
cassert(config_prof);
/* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(&ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
@ -723,6 +758,8 @@ prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
char buf[UMAX2S_BUFSIZE];
unsigned i;
cassert(config_prof);
if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
@ -767,6 +804,8 @@ prof_dump_maps(bool propagate_err)
char mpath[6 + UMAX2S_BUFSIZE
+ 5 + 1];
cassert(config_prof);
i = 0;
s = "/proc/";
@ -827,6 +866,8 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
char buf[UMAX2S_BUFSIZE];
size_t leak_nctx;
cassert(config_prof);
prof_enter();
prof_dump_fd = creat(filename, 0644);
if (prof_dump_fd == -1) {
@ -917,6 +958,8 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
char *s;
unsigned i, slen;
cassert(config_prof);
/*
* Construct a filename of the form:
*
@ -979,6 +1022,8 @@ prof_fdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (prof_booted == false)
return;
@ -995,6 +1040,8 @@ prof_idump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (prof_booted == false)
return;
malloc_mutex_lock(&enq_mtx);
@ -1019,6 +1066,8 @@ prof_mdump(const char *filename)
{
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (opt_prof == false || prof_booted == false)
return (true);
@ -1040,6 +1089,8 @@ prof_gdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (prof_booted == false)
return;
malloc_mutex_lock(&enq_mtx);
@ -1066,6 +1117,7 @@ prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
assert(hash1 != NULL);
assert(hash2 != NULL);
@ -1094,6 +1146,8 @@ prof_bt_keycomp(const void *k1, const void *k2)
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
cassert(config_prof);
if (bt1->len != bt2->len)
return (false);
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
@ -1104,6 +1158,8 @@ prof_tdata_init(void)
{
prof_tdata_t *prof_tdata;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
if (prof_tdata == NULL)
@ -1138,6 +1194,8 @@ prof_tdata_cleanup(void *arg)
prof_thr_cnt_t *cnt;
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
cassert(config_prof);
/*
* Delete the hash table. All of its contents can still be iterated
* over via the LRU.
@ -1161,6 +1219,8 @@ void
prof_boot0(void)
{
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
}
@ -1169,6 +1229,8 @@ void
prof_boot1(void)
{
cassert(config_prof);
/*
* opt_prof and prof_promote must be in their final state before any
* arenas are initialized, so this function must be executed early.
@ -1197,6 +1259,8 @@ bool
prof_boot2(void)
{
cassert(config_prof);
if (opt_prof) {
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
@ -1241,4 +1305,3 @@ prof_boot2(void)
}
/******************************************************************************/
#endif /* JEMALLOC_PROF */

View File

@ -39,14 +39,11 @@
bool opt_stats_print = false;
#ifdef JEMALLOC_STATS
size_t stats_cactive = 0;
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#ifdef JEMALLOC_STATS
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
void *cbopaque, const char *format, va_list ap);
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
@ -55,10 +52,10 @@ static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
#endif
/******************************************************************************/
/* XXX Refactor by adding malloc_vsnprintf(). */
/*
* We don't want to depend on vsnprintf() for production builds, since that can
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
@ -99,7 +96,6 @@ u2s(uint64_t x, unsigned base, char *s)
return (&s[i]);
}
#ifdef JEMALLOC_STATS
static void
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap)
@ -149,9 +145,7 @@ malloc_printf(const char *format, ...)
malloc_vcprintf(NULL, NULL, format, ap);
va_end(ap);
}
#endif
#ifdef JEMALLOC_STATS
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
@ -377,7 +371,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
stats_arena_bins_print(write_cb, cbopaque, i);
stats_arena_lruns_print(write_cb, cbopaque, i);
}
#endif
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
@ -674,8 +667,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
write_cb(cbopaque, ")\n");
}
#ifdef JEMALLOC_STATS
{
if (config_stats) {
int err;
size_t sszp, ssz;
size_t *cactive;
@ -785,6 +777,5 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
}
}
#endif /* #ifdef JEMALLOC_STATS */
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
}

View File

@ -38,31 +38,22 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
arena_tcache_fill_small(tcache->arena, tbin, binind
#ifdef JEMALLOC_PROF
, tcache->prof_accumbytes
#endif
);
#ifdef JEMALLOC_PROF
tcache->prof_accumbytes = 0;
#endif
arena_tcache_fill_small(tcache->arena, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
)
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
#ifdef JEMALLOC_STATS
bool merged_stats = false;
#endif
assert(binind < nbins);
assert(rem <= tbin->ncached);
@ -74,25 +65,21 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
#ifdef JEMALLOC_PROF
if (arena == tcache->arena) {
if (config_prof && arena == tcache->arena) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
tcache->prof_accumbytes = 0;
}
#endif
malloc_mutex_lock(&bin->lock);
#ifdef JEMALLOC_STATS
if (arena == tcache->arena) {
if (config_stats && arena == tcache->arena) {
assert(merged_stats == false);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
@ -117,8 +104,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&bin->lock);
}
#ifdef JEMALLOC_STATS
if (merged_stats == false) {
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
@ -130,7 +116,6 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&bin->lock);
}
#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
@ -140,17 +125,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
}
void
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache_t *tcache
#endif
)
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
tcache_t *tcache)
{
void *ptr;
unsigned i, nflush, ndeferred;
#ifdef JEMALLOC_STATS
bool merged_stats = false;
#endif
assert(binind < nhbins);
assert(rem <= tbin->ncached);
@ -162,23 +142,21 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
if (arena == tcache->arena) {
#endif
#ifdef JEMALLOC_PROF
arena_prof_accum(arena, tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
#endif
#ifdef JEMALLOC_STATS
merged_stats = true;
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
#endif
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
arena_prof_accum(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
arena->stats.nrequests_large +=
tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
#endif
ndeferred = 0;
for (i = 0; i < nflush; i++) {
ptr = tbin->avail[i];
@ -199,8 +177,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
}
malloc_mutex_unlock(&arena->lock);
}
#ifdef JEMALLOC_STATS
if (merged_stats == false) {
if (config_stats && merged_stats == false) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
@ -213,7 +190,6 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
tbin->tstats.nrequests = 0;
malloc_mutex_unlock(&arena->lock);
}
#endif
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
rem * sizeof(void *));
@ -254,13 +230,13 @@ tcache_create(arena_t *arena)
if (tcache == NULL)
return (NULL);
#ifdef JEMALLOC_STATS
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
#endif
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(&arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&arena->lock);
}
tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
@ -282,43 +258,32 @@ tcache_destroy(tcache_t *tcache)
unsigned i;
size_t tcache_size;
#ifdef JEMALLOC_STATS
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
#endif
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
}
for (i = 0; i < nbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tbin, i, 0
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
tcache_bin_flush_small(tbin, i, 0, tcache);
#ifdef JEMALLOC_STATS
if (tbin->tstats.nrequests != 0) {
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
#endif
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_large(tbin, i, 0
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
, tcache
#endif
);
tcache_bin_flush_large(tbin, i, 0, tcache);
#ifdef JEMALLOC_STATS
if (tbin->tstats.nrequests != 0) {
if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
@ -326,16 +291,13 @@ tcache_destroy(tcache_t *tcache)
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
#endif
}
#ifdef JEMALLOC_PROF
if (tcache->prof_accumbytes > 0) {
if (config_prof && tcache->prof_accumbytes > 0) {
malloc_mutex_lock(&tcache->arena->lock);
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&tcache->arena->lock);
}
#endif
tcache_size = arena_salloc(tcache);
if (tcache_size <= small_maxclass) {
@ -389,7 +351,6 @@ tcache_thread_cleanup(void *arg)
}
}
#ifdef JEMALLOC_STATS
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
@ -413,7 +374,6 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
tbin->tstats.nrequests = 0;
}
}
#endif
bool
tcache_boot(void)