Reduce cpp conditional logic complexity.
Convert configuration-related cpp conditional logic to use static constant variables, e.g.: #ifdef JEMALLOC_DEBUG [...] #endif becomes: if (config_debug) { [...] } The advantage is clearer, more concise code. The main disadvantage is that data structures no longer have conditionally defined fields, so they pay the cost of all fields regardless of whether they are used. In practice, this is only a minor concern; config_stats will go away in an upcoming change, and config_prof is the only other major feature that depends on more than a few special-purpose fields.
This commit is contained in:
734
src/arena.c
734
src/arena.c
File diff suppressed because it is too large
Load Diff
104
src/chunk.c
104
src/chunk.c
@@ -5,18 +5,12 @@
|
||||
/* Data. */
|
||||
|
||||
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||
#ifdef JEMALLOC_SWAP
|
||||
bool opt_overcommit = true;
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
malloc_mutex_t chunks_mtx;
|
||||
chunk_stats_t stats_chunks;
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_IVSALLOC
|
||||
rtree_t *chunks_rtree;
|
||||
#endif
|
||||
|
||||
/* Various chunk-related settings. */
|
||||
size_t chunksize;
|
||||
@@ -41,67 +35,50 @@ chunk_alloc(size_t size, bool base, bool *zero)
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
#ifdef JEMALLOC_SWAP
|
||||
if (swap_enabled) {
|
||||
if (config_swap && swap_enabled) {
|
||||
ret = chunk_alloc_swap(size, zero);
|
||||
if (ret != NULL)
|
||||
goto RETURN;
|
||||
}
|
||||
|
||||
if (swap_enabled == false || opt_overcommit) {
|
||||
#endif
|
||||
#ifdef JEMALLOC_DSS
|
||||
ret = chunk_alloc_dss(size, zero);
|
||||
if (ret != NULL)
|
||||
goto RETURN;
|
||||
#endif
|
||||
if (config_dss) {
|
||||
ret = chunk_alloc_dss(size, zero);
|
||||
if (ret != NULL)
|
||||
goto RETURN;
|
||||
}
|
||||
ret = chunk_alloc_mmap(size);
|
||||
if (ret != NULL) {
|
||||
*zero = true;
|
||||
goto RETURN;
|
||||
}
|
||||
#ifdef JEMALLOC_SWAP
|
||||
}
|
||||
#endif
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
ret = NULL;
|
||||
RETURN:
|
||||
#ifdef JEMALLOC_IVSALLOC
|
||||
if (base == false && ret != NULL) {
|
||||
if (config_ivsalloc && base == false && ret != NULL) {
|
||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
|
||||
chunk_dealloc(ret, size, true);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
if (ret != NULL) {
|
||||
# ifdef JEMALLOC_PROF
|
||||
if ((config_stats || config_prof) && ret != NULL) {
|
||||
bool gdump;
|
||||
# endif
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
# ifdef JEMALLOC_STATS
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
# endif
|
||||
if (config_stats)
|
||||
stats_chunks.nchunks += (size / chunksize);
|
||||
stats_chunks.curchunks += (size / chunksize);
|
||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||
stats_chunks.highchunks = stats_chunks.curchunks;
|
||||
# ifdef JEMALLOC_PROF
|
||||
gdump = true;
|
||||
# endif
|
||||
}
|
||||
# ifdef JEMALLOC_PROF
|
||||
else
|
||||
if (config_prof)
|
||||
gdump = true;
|
||||
} else if (config_prof)
|
||||
gdump = false;
|
||||
# endif
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
# ifdef JEMALLOC_PROF
|
||||
if (opt_prof && opt_prof_gdump && gdump)
|
||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||
prof_gdump();
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
||||
return (ret);
|
||||
@@ -116,24 +93,20 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
#ifdef JEMALLOC_IVSALLOC
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
#endif
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
#endif
|
||||
if (config_ivsalloc)
|
||||
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
if (config_stats || config_prof) {
|
||||
malloc_mutex_lock(&chunks_mtx);
|
||||
stats_chunks.curchunks -= (size / chunksize);
|
||||
malloc_mutex_unlock(&chunks_mtx);
|
||||
}
|
||||
|
||||
if (unmap) {
|
||||
#ifdef JEMALLOC_SWAP
|
||||
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
|
||||
if (config_swap && swap_enabled && chunk_dealloc_swap(chunk,
|
||||
size) == false)
|
||||
return;
|
||||
#endif
|
||||
#ifdef JEMALLOC_DSS
|
||||
if (chunk_dealloc_dss(chunk, size) == false)
|
||||
if (config_dss && chunk_dealloc_dss(chunk, size) == false)
|
||||
return;
|
||||
#endif
|
||||
chunk_dealloc_mmap(chunk, size);
|
||||
}
|
||||
}
|
||||
@@ -148,26 +121,23 @@ chunk_boot(void)
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> PAGE_SHIFT);
|
||||
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
if (malloc_mutex_init(&chunks_mtx))
|
||||
if (config_stats || config_prof) {
|
||||
if (malloc_mutex_init(&chunks_mtx))
|
||||
return (true);
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
}
|
||||
if (config_swap && chunk_swap_boot())
|
||||
return (true);
|
||||
memset(&stats_chunks, 0, sizeof(chunk_stats_t));
|
||||
#endif
|
||||
#ifdef JEMALLOC_SWAP
|
||||
if (chunk_swap_boot())
|
||||
return (true);
|
||||
#endif
|
||||
if (chunk_mmap_boot())
|
||||
return (true);
|
||||
#ifdef JEMALLOC_DSS
|
||||
if (chunk_dss_boot())
|
||||
if (config_dss && chunk_dss_boot())
|
||||
return (true);
|
||||
#endif
|
||||
#ifdef JEMALLOC_IVSALLOC
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
|
||||
if (chunks_rtree == NULL)
|
||||
return (true);
|
||||
#endif
|
||||
if (config_ivsalloc) {
|
||||
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
opt_lg_chunk);
|
||||
if (chunks_rtree == NULL)
|
||||
return (true);
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
@@ -1,6 +1,5 @@
|
||||
#define JEMALLOC_CHUNK_DSS_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#ifdef JEMALLOC_DSS
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
@@ -35,6 +34,8 @@ chunk_recycle_dss(size_t size, bool *zero)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
key.addr = NULL;
|
||||
key.size = size;
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
@@ -74,6 +75,8 @@ chunk_alloc_dss(size_t size, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
ret = chunk_recycle_dss(size, zero);
|
||||
if (ret != NULL)
|
||||
return (ret);
|
||||
@@ -131,6 +134,8 @@ chunk_dealloc_dss_record(void *chunk, size_t size)
|
||||
{
|
||||
extent_node_t *xnode, *node, *prev, key;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
xnode = NULL;
|
||||
while (true) {
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
@@ -204,6 +209,8 @@ chunk_in_dss(void *chunk)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
||||
@@ -220,6 +227,8 @@ chunk_dealloc_dss(void *chunk, size_t size)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
malloc_mutex_lock(&dss_mtx);
|
||||
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
||||
&& (uintptr_t)chunk < (uintptr_t)dss_max) {
|
||||
@@ -269,6 +278,8 @@ bool
|
||||
chunk_dss_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_dss);
|
||||
|
||||
if (malloc_mutex_init(&dss_mtx))
|
||||
return (true);
|
||||
dss_base = sbrk(0);
|
||||
@@ -281,4 +292,3 @@ chunk_dss_boot(void)
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_DSS */
|
||||
|
@@ -1,6 +1,6 @@
|
||||
#define JEMALLOC_CHUNK_SWAP_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#ifdef JEMALLOC_SWAP
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
@@ -9,9 +9,7 @@ bool swap_enabled;
|
||||
bool swap_prezeroed;
|
||||
size_t swap_nfds;
|
||||
int *swap_fds;
|
||||
#ifdef JEMALLOC_STATS
|
||||
size_t swap_avail;
|
||||
#endif
|
||||
|
||||
/* Base address of the mmap()ed file(s). */
|
||||
static void *swap_base;
|
||||
@@ -42,6 +40,8 @@ chunk_recycle_swap(size_t size, bool *zero)
|
||||
{
|
||||
extent_node_t *node, key;
|
||||
|
||||
cassert(config_swap);
|
||||
|
||||
key.addr = NULL;
|
||||
key.size = size;
|
||||
malloc_mutex_lock(&swap_mtx);
|
||||
@@ -65,9 +65,8 @@ chunk_recycle_swap(size_t size, bool *zero)
|
||||
node->size -= size;
|
||||
extent_tree_szad_insert(&swap_chunks_szad, node);
|
||||
}
|
||||
#ifdef JEMALLOC_STATS
|
||||
swap_avail -= size;
|
||||
#endif
|
||||
if (config_stats)
|
||||
swap_avail -= size;
|
||||
malloc_mutex_unlock(&swap_mtx);
|
||||
|
||||
if (*zero)
|
||||
@@ -84,6 +83,7 @@ chunk_alloc_swap(size_t size, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
cassert(config_swap);
|
||||
assert(swap_enabled);
|
||||
|
||||
ret = chunk_recycle_swap(size, zero);
|
||||
@@ -94,9 +94,8 @@ chunk_alloc_swap(size_t size, bool *zero)
|
||||
if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
|
||||
ret = swap_end;
|
||||
swap_end = (void *)((uintptr_t)swap_end + size);
|
||||
#ifdef JEMALLOC_STATS
|
||||
swap_avail -= size;
|
||||
#endif
|
||||
if (config_stats)
|
||||
swap_avail -= size;
|
||||
malloc_mutex_unlock(&swap_mtx);
|
||||
|
||||
if (swap_prezeroed)
|
||||
@@ -116,6 +115,8 @@ chunk_dealloc_swap_record(void *chunk, size_t size)
|
||||
{
|
||||
extent_node_t *xnode, *node, *prev, key;
|
||||
|
||||
cassert(config_swap);
|
||||
|
||||
xnode = NULL;
|
||||
while (true) {
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
@@ -189,6 +190,7 @@ chunk_in_swap(void *chunk)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_swap);
|
||||
assert(swap_enabled);
|
||||
|
||||
malloc_mutex_lock(&swap_mtx);
|
||||
@@ -207,6 +209,7 @@ chunk_dealloc_swap(void *chunk, size_t size)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
cassert(config_swap);
|
||||
assert(swap_enabled);
|
||||
|
||||
malloc_mutex_lock(&swap_mtx);
|
||||
@@ -237,9 +240,8 @@ chunk_dealloc_swap(void *chunk, size_t size)
|
||||
} else
|
||||
madvise(chunk, size, MADV_DONTNEED);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
swap_avail += size;
|
||||
#endif
|
||||
if (config_stats)
|
||||
swap_avail += size;
|
||||
ret = false;
|
||||
goto RETURN;
|
||||
}
|
||||
@@ -260,6 +262,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
|
||||
size_t cumsize, voff;
|
||||
size_t sizes[nfds];
|
||||
|
||||
cassert(config_swap);
|
||||
|
||||
malloc_mutex_lock(&swap_mtx);
|
||||
|
||||
/* Get file sizes. */
|
||||
@@ -362,9 +366,8 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
|
||||
memcpy(swap_fds, fds, nfds * sizeof(int));
|
||||
swap_nfds = nfds;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
swap_avail = cumsize;
|
||||
#endif
|
||||
if (config_stats)
|
||||
swap_avail = cumsize;
|
||||
|
||||
swap_enabled = true;
|
||||
|
||||
@@ -378,6 +381,8 @@ bool
|
||||
chunk_swap_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_swap);
|
||||
|
||||
if (malloc_mutex_init(&swap_mtx))
|
||||
return (true);
|
||||
|
||||
@@ -385,9 +390,8 @@ chunk_swap_boot(void)
|
||||
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
|
||||
swap_nfds = 0;
|
||||
swap_fds = NULL;
|
||||
#ifdef JEMALLOC_STATS
|
||||
swap_avail = 0;
|
||||
#endif
|
||||
if (config_stats)
|
||||
swap_avail = 0;
|
||||
swap_base = NULL;
|
||||
swap_end = NULL;
|
||||
swap_max = NULL;
|
||||
@@ -397,6 +401,3 @@ chunk_swap_boot(void)
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_SWAP */
|
||||
|
17
src/ckh.c
17
src/ckh.c
@@ -73,7 +73,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
|
||||
size_t hash1, hash2, bucket, cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
dassert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh->magic == CKH_MAGIC);
|
||||
|
||||
ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
|
||||
|
||||
@@ -394,9 +394,8 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
goto RETURN;
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_DEBUG
|
||||
ckh->magic = CKH_MAGIC;
|
||||
#endif
|
||||
if (config_debug)
|
||||
ckh->magic = CKH_MAGIC;
|
||||
|
||||
ret = false;
|
||||
RETURN:
|
||||
@@ -408,7 +407,7 @@ ckh_delete(ckh_t *ckh)
|
||||
{
|
||||
|
||||
assert(ckh != NULL);
|
||||
dassert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh->magic == CKH_MAGIC);
|
||||
|
||||
#ifdef CKH_VERBOSE
|
||||
malloc_printf(
|
||||
@@ -433,7 +432,7 @@ ckh_count(ckh_t *ckh)
|
||||
{
|
||||
|
||||
assert(ckh != NULL);
|
||||
dassert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh->magic == CKH_MAGIC);
|
||||
|
||||
return (ckh->count);
|
||||
}
|
||||
@@ -464,7 +463,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
bool ret;
|
||||
|
||||
assert(ckh != NULL);
|
||||
dassert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh_search(ckh, key, NULL, NULL));
|
||||
|
||||
#ifdef CKH_COUNT
|
||||
@@ -489,7 +488,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
size_t cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
dassert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh->magic == CKH_MAGIC);
|
||||
|
||||
cell = ckh_isearch(ckh, searchkey);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
@@ -521,7 +520,7 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
size_t cell;
|
||||
|
||||
assert(ckh != NULL);
|
||||
dassert(ckh->magic == CKH_MAGIC);
|
||||
assert(ckh->magic == CKH_MAGIC);
|
||||
|
||||
cell = ckh_isearch(ckh, searchkey);
|
||||
if (cell != SIZE_T_MAX) {
|
||||
|
@@ -3,7 +3,6 @@
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
||||
static inline int
|
||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
@@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||
/* Generate red-black tree functions. */
|
||||
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
|
||||
extent_szad_comp)
|
||||
#endif
|
||||
|
||||
static inline int
|
||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||
|
80
src/huge.c
80
src/huge.c
@@ -4,11 +4,9 @@
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
uint64_t huge_nmalloc;
|
||||
uint64_t huge_ndalloc;
|
||||
size_t huge_allocated;
|
||||
#endif
|
||||
|
||||
malloc_mutex_t huge_mtx;
|
||||
|
||||
@@ -49,21 +47,19 @@ huge_malloc(size_t size, bool zero)
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
#ifdef JEMALLOC_STATS
|
||||
stats_cactive_add(csize);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += csize;
|
||||
#endif
|
||||
if (config_stats) {
|
||||
stats_cactive_add(csize);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += csize;
|
||||
}
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (zero == false) {
|
||||
if (config_fill && zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, csize);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, csize);
|
||||
}
|
||||
#endif
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -134,21 +130,19 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
||||
|
||||
malloc_mutex_lock(&huge_mtx);
|
||||
extent_tree_ad_insert(&huge, node);
|
||||
#ifdef JEMALLOC_STATS
|
||||
stats_cactive_add(chunk_size);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += chunk_size;
|
||||
#endif
|
||||
if (config_stats) {
|
||||
stats_cactive_add(chunk_size);
|
||||
huge_nmalloc++;
|
||||
huge_allocated += chunk_size;
|
||||
}
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (zero == false) {
|
||||
if (config_fill && zero == false) {
|
||||
if (opt_junk)
|
||||
memset(ret, 0xa5, chunk_size);
|
||||
else if (opt_zero)
|
||||
memset(ret, 0, chunk_size);
|
||||
}
|
||||
#endif
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -164,12 +158,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
|
||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
|
||||
assert(CHUNK_CEILING(oldsize) == oldsize);
|
||||
#ifdef JEMALLOC_FILL
|
||||
if (opt_junk && size < oldsize) {
|
||||
if (config_fill && opt_junk && size < oldsize) {
|
||||
memset((void *)((uintptr_t)ptr + size), 0x5a,
|
||||
oldsize - size);
|
||||
}
|
||||
#endif
|
||||
return (ptr);
|
||||
}
|
||||
|
||||
@@ -223,15 +215,10 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
* source nor the destination are in swap or dss.
|
||||
*/
|
||||
#ifdef JEMALLOC_MREMAP_FIXED
|
||||
if (oldsize >= chunksize
|
||||
# ifdef JEMALLOC_SWAP
|
||||
&& (swap_enabled == false || (chunk_in_swap(ptr) == false &&
|
||||
chunk_in_swap(ret) == false))
|
||||
# endif
|
||||
# ifdef JEMALLOC_DSS
|
||||
&& chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
|
||||
# endif
|
||||
) {
|
||||
if (oldsize >= chunksize && (config_swap == false || swap_enabled ==
|
||||
false || (chunk_in_swap(ptr) == false && chunk_in_swap(ret) ==
|
||||
false)) && (config_dss == false || (chunk_in_dss(ptr) == false &&
|
||||
chunk_in_dss(ret) == false))) {
|
||||
size_t newsize = huge_salloc(ret);
|
||||
|
||||
/*
|
||||
@@ -285,23 +272,16 @@ huge_dalloc(void *ptr, bool unmap)
|
||||
assert(node->addr == ptr);
|
||||
extent_tree_ad_remove(&huge, node);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
stats_cactive_sub(node->size);
|
||||
huge_ndalloc++;
|
||||
huge_allocated -= node->size;
|
||||
#endif
|
||||
if (config_stats) {
|
||||
stats_cactive_sub(node->size);
|
||||
huge_ndalloc++;
|
||||
huge_allocated -= node->size;
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
|
||||
if (unmap) {
|
||||
/* Unmap chunk. */
|
||||
#ifdef JEMALLOC_FILL
|
||||
#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
|
||||
if (opt_junk)
|
||||
memset(node->addr, 0x5a, node->size);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
if (unmap && config_fill && (config_swap || config_dss) && opt_junk)
|
||||
memset(node->addr, 0x5a, node->size);
|
||||
|
||||
chunk_dealloc(node->addr, node->size, unmap);
|
||||
|
||||
@@ -328,7 +308,6 @@ huge_salloc(const void *ptr)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_PROF
|
||||
prof_ctx_t *
|
||||
huge_prof_ctx_get(const void *ptr)
|
||||
{
|
||||
@@ -365,7 +344,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
|
||||
malloc_mutex_unlock(&huge_mtx);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
huge_boot(void)
|
||||
@@ -376,11 +354,11 @@ huge_boot(void)
|
||||
return (true);
|
||||
extent_tree_ad_new(&huge);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
huge_nmalloc = 0;
|
||||
huge_ndalloc = 0;
|
||||
huge_allocated = 0;
|
||||
#endif
|
||||
if (config_stats) {
|
||||
huge_nmalloc = 0;
|
||||
huge_ndalloc = 0;
|
||||
huge_allocated = 0;
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
608
src/jemalloc.c
608
src/jemalloc.c
File diff suppressed because it is too large
Load Diff
75
src/prof.c
75
src/prof.c
@@ -1,6 +1,5 @@
|
||||
#define JEMALLOC_PROF_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
#ifdef JEMALLOC_PROF
|
||||
/******************************************************************************/
|
||||
|
||||
#ifdef JEMALLOC_PROF_LIBUNWIND
|
||||
@@ -102,6 +101,8 @@ void
|
||||
bt_init(prof_bt_t *bt, void **vec)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
bt->vec = vec;
|
||||
bt->len = 0;
|
||||
}
|
||||
@@ -110,6 +111,8 @@ static void
|
||||
bt_destroy(prof_bt_t *bt)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
idalloc(bt);
|
||||
}
|
||||
|
||||
@@ -118,6 +121,8 @@ bt_dup(prof_bt_t *bt)
|
||||
{
|
||||
prof_bt_t *ret;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Create a single allocation that has space for vec immediately
|
||||
* following the prof_bt_t structure. The backtraces that get
|
||||
@@ -141,6 +146,8 @@ static inline void
|
||||
prof_enter(void)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
malloc_mutex_lock(&enq_mtx);
|
||||
enq = true;
|
||||
malloc_mutex_unlock(&enq_mtx);
|
||||
@@ -153,6 +160,8 @@ prof_leave(void)
|
||||
{
|
||||
bool idump, gdump;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
malloc_mutex_unlock(&bt2ctx_mtx);
|
||||
|
||||
malloc_mutex_lock(&enq_mtx);
|
||||
@@ -178,6 +187,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
unsigned i;
|
||||
int err;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(bt->len == 0);
|
||||
assert(bt->vec != NULL);
|
||||
assert(max <= (1U << opt_lg_prof_bt_max));
|
||||
@@ -204,12 +214,13 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef JEMALLOC_PROF_LIBGCC
|
||||
#elif (defined(JEMALLOC_PROF_LIBGCC))
|
||||
static _Unwind_Reason_Code
|
||||
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
return (_URC_NO_REASON);
|
||||
}
|
||||
|
||||
@@ -218,6 +229,8 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg)
|
||||
{
|
||||
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (data->nignore > 0)
|
||||
data->nignore--;
|
||||
else {
|
||||
@@ -235,10 +248,11 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
{
|
||||
prof_unwind_data_t data = {bt, nignore, max};
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
_Unwind_Backtrace(prof_unwind_callback, &data);
|
||||
}
|
||||
#endif
|
||||
#ifdef JEMALLOC_PROF_GCC
|
||||
#elif (defined(JEMALLOC_PROF_GCC))
|
||||
void
|
||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
{
|
||||
@@ -257,6 +271,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
} else \
|
||||
return;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(nignore <= 3);
|
||||
assert(max <= (1U << opt_lg_prof_bt_max));
|
||||
|
||||
@@ -407,6 +422,14 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
BT_FRAME(130)
|
||||
#undef BT_FRAME
|
||||
}
|
||||
#else
|
||||
void
|
||||
prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
assert(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
prof_thr_cnt_t *
|
||||
@@ -418,6 +441,8 @@ prof_lookup(prof_bt_t *bt)
|
||||
} ret;
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
prof_tdata = PROF_TCACHE_GET();
|
||||
if (prof_tdata == NULL) {
|
||||
prof_tdata = prof_tdata_init();
|
||||
@@ -553,6 +578,8 @@ prof_flush(bool propagate_err)
|
||||
bool ret = false;
|
||||
ssize_t err;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
|
||||
if (err == -1) {
|
||||
if (propagate_err == false) {
|
||||
@@ -573,6 +600,8 @@ prof_write(const char *s, bool propagate_err)
|
||||
{
|
||||
unsigned i, slen, n;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
i = 0;
|
||||
slen = strlen(s);
|
||||
while (i < slen) {
|
||||
@@ -602,6 +631,8 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
|
||||
prof_thr_cnt_t *thr_cnt;
|
||||
prof_cnt_t tcnt;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
malloc_mutex_lock(&ctx->lock);
|
||||
|
||||
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
|
||||
@@ -648,6 +679,8 @@ static void
|
||||
prof_ctx_destroy(prof_ctx_t *ctx)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Check that ctx is still unused by any thread cache before destroying
|
||||
* it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in
|
||||
@@ -686,6 +719,8 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
|
||||
{
|
||||
bool destroy;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/* Merge cnt stats and detach from ctx. */
|
||||
malloc_mutex_lock(&ctx->lock);
|
||||
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
|
||||
@@ -723,6 +758,8 @@ prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
unsigned i;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
|
||||
assert(ctx->cnt_summed.curbytes == 0);
|
||||
assert(ctx->cnt_summed.accumobjs == 0);
|
||||
@@ -767,6 +804,8 @@ prof_dump_maps(bool propagate_err)
|
||||
char mpath[6 + UMAX2S_BUFSIZE
|
||||
+ 5 + 1];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
i = 0;
|
||||
|
||||
s = "/proc/";
|
||||
@@ -827,6 +866,8 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
|
||||
char buf[UMAX2S_BUFSIZE];
|
||||
size_t leak_nctx;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
prof_enter();
|
||||
prof_dump_fd = creat(filename, 0644);
|
||||
if (prof_dump_fd == -1) {
|
||||
@@ -917,6 +958,8 @@ prof_dump_filename(char *filename, char v, int64_t vseq)
|
||||
char *s;
|
||||
unsigned i, slen;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Construct a filename of the form:
|
||||
*
|
||||
@@ -979,6 +1022,8 @@ prof_fdump(void)
|
||||
{
|
||||
char filename[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_booted == false)
|
||||
return;
|
||||
|
||||
@@ -995,6 +1040,8 @@ prof_idump(void)
|
||||
{
|
||||
char filename[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_booted == false)
|
||||
return;
|
||||
malloc_mutex_lock(&enq_mtx);
|
||||
@@ -1019,6 +1066,8 @@ prof_mdump(const char *filename)
|
||||
{
|
||||
char filename_buf[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (opt_prof == false || prof_booted == false)
|
||||
return (true);
|
||||
|
||||
@@ -1040,6 +1089,8 @@ prof_gdump(void)
|
||||
{
|
||||
char filename[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_booted == false)
|
||||
return;
|
||||
malloc_mutex_lock(&enq_mtx);
|
||||
@@ -1066,6 +1117,7 @@ prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
|
||||
uint64_t h;
|
||||
prof_bt_t *bt = (prof_bt_t *)key;
|
||||
|
||||
cassert(config_prof);
|
||||
assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
|
||||
assert(hash1 != NULL);
|
||||
assert(hash2 != NULL);
|
||||
@@ -1094,6 +1146,8 @@ prof_bt_keycomp(const void *k1, const void *k2)
|
||||
const prof_bt_t *bt1 = (prof_bt_t *)k1;
|
||||
const prof_bt_t *bt2 = (prof_bt_t *)k2;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (bt1->len != bt2->len)
|
||||
return (false);
|
||||
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
|
||||
@@ -1104,6 +1158,8 @@ prof_tdata_init(void)
|
||||
{
|
||||
prof_tdata_t *prof_tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/* Initialize an empty cache for this thread. */
|
||||
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
|
||||
if (prof_tdata == NULL)
|
||||
@@ -1138,6 +1194,8 @@ prof_tdata_cleanup(void *arg)
|
||||
prof_thr_cnt_t *cnt;
|
||||
prof_tdata_t *prof_tdata = (prof_tdata_t *)arg;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Delete the hash table. All of its contents can still be iterated
|
||||
* over via the LRU.
|
||||
@@ -1161,6 +1219,8 @@ void
|
||||
prof_boot0(void)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
|
||||
sizeof(PROF_PREFIX_DEFAULT));
|
||||
}
|
||||
@@ -1169,6 +1229,8 @@ void
|
||||
prof_boot1(void)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* opt_prof and prof_promote must be in their final state before any
|
||||
* arenas are initialized, so this function must be executed early.
|
||||
@@ -1197,6 +1259,8 @@ bool
|
||||
prof_boot2(void)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (opt_prof) {
|
||||
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
|
||||
prof_bt_keycomp))
|
||||
@@ -1241,4 +1305,3 @@ prof_boot2(void)
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
#endif /* JEMALLOC_PROF */
|
||||
|
13
src/stats.c
13
src/stats.c
@@ -39,14 +39,11 @@
|
||||
|
||||
bool opt_stats_print = false;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
size_t stats_cactive = 0;
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
static void malloc_vcprintf(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, const char *format, va_list ap);
|
||||
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
|
||||
@@ -55,10 +52,10 @@ static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i);
|
||||
static void stats_arena_print(void (*write_cb)(void *, const char *),
|
||||
void *cbopaque, unsigned i);
|
||||
#endif
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* XXX Refactor by adding malloc_vsnprintf(). */
|
||||
/*
|
||||
* We don't want to depend on vsnprintf() for production builds, since that can
|
||||
* cause unnecessary bloat for static binaries. u2s() provides minimal integer
|
||||
@@ -99,7 +96,6 @@ u2s(uint64_t x, unsigned base, char *s)
|
||||
return (&s[i]);
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
static void
|
||||
malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
const char *format, va_list ap)
|
||||
@@ -149,9 +145,7 @@ malloc_printf(const char *format, ...)
|
||||
malloc_vcprintf(NULL, NULL, format, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
static void
|
||||
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
unsigned i)
|
||||
@@ -377,7 +371,6 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
stats_arena_bins_print(write_cb, cbopaque, i);
|
||||
stats_arena_lruns_print(write_cb, cbopaque, i);
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
@@ -674,8 +667,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
write_cb(cbopaque, ")\n");
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
{
|
||||
if (config_stats) {
|
||||
int err;
|
||||
size_t sszp, ssz;
|
||||
size_t *cactive;
|
||||
@@ -785,6 +777,5 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* #ifdef JEMALLOC_STATS */
|
||||
write_cb(cbopaque, "--- End jemalloc statistics ---\n");
|
||||
}
|
||||
|
130
src/tcache.c
130
src/tcache.c
@@ -38,31 +38,22 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill_small(tcache->arena, tbin, binind
|
||||
#ifdef JEMALLOC_PROF
|
||||
, tcache->prof_accumbytes
|
||||
#endif
|
||||
);
|
||||
#ifdef JEMALLOC_PROF
|
||||
tcache->prof_accumbytes = 0;
|
||||
#endif
|
||||
arena_tcache_fill_small(tcache->arena, tbin, binind,
|
||||
config_prof ? tcache->prof_accumbytes : 0);
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes = 0;
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache_t *tcache
|
||||
#endif
|
||||
)
|
||||
tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
{
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
#ifdef JEMALLOC_STATS
|
||||
bool merged_stats = false;
|
||||
#endif
|
||||
|
||||
assert(binind < nbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
@@ -74,25 +65,21 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
arena_t *arena = chunk->arena;
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
|
||||
#ifdef JEMALLOC_PROF
|
||||
if (arena == tcache->arena) {
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (arena == tcache->arena) {
|
||||
if (config_stats && arena == tcache->arena) {
|
||||
assert(merged_stats == false);
|
||||
merged_stats = true;
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
#endif
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = tbin->avail[i];
|
||||
@@ -117,8 +104,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
}
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (merged_stats == false) {
|
||||
if (config_stats && merged_stats == false) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
@@ -130,7 +116,6 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
@@ -140,17 +125,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache_t *tcache
|
||||
#endif
|
||||
)
|
||||
tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
{
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
#ifdef JEMALLOC_STATS
|
||||
bool merged_stats = false;
|
||||
#endif
|
||||
|
||||
assert(binind < nhbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
@@ -162,23 +142,21 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
arena_t *arena = chunk->arena;
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
||||
if (arena == tcache->arena) {
|
||||
#endif
|
||||
#ifdef JEMALLOC_PROF
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
#endif
|
||||
#ifdef JEMALLOC_STATS
|
||||
merged_stats = true;
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - nbins].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
#endif
|
||||
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
if (config_prof) {
|
||||
arena_prof_accum(arena,
|
||||
tcache->prof_accumbytes);
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
if (config_stats) {
|
||||
merged_stats = true;
|
||||
arena->stats.nrequests_large +=
|
||||
tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - nbins].nrequests +=
|
||||
tbin->tstats.nrequests;
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
ndeferred = 0;
|
||||
for (i = 0; i < nflush; i++) {
|
||||
ptr = tbin->avail[i];
|
||||
@@ -199,8 +177,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (merged_stats == false) {
|
||||
if (config_stats && merged_stats == false) {
|
||||
/*
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
@@ -213,7 +190,6 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
|
||||
tbin->tstats.nrequests = 0;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
|
||||
rem * sizeof(void *));
|
||||
@@ -254,13 +230,13 @@ tcache_create(arena_t *arena)
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
#endif
|
||||
if (config_stats) {
|
||||
/* Link into list of extant tcaches. */
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
ql_elm_new(tcache, link);
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
tcache->arena = arena;
|
||||
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
|
||||
@@ -282,43 +258,32 @@ tcache_destroy(tcache_t *tcache)
|
||||
unsigned i;
|
||||
size_t tcache_size;
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
#endif
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
}
|
||||
|
||||
for (i = 0; i < nbins; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_small(tbin, i, 0
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache
|
||||
#endif
|
||||
);
|
||||
tcache_bin_flush_small(tbin, i, 0, tcache);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (tbin->tstats.nrequests != 0) {
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_large(tbin, i, 0
|
||||
#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
|
||||
, tcache
|
||||
#endif
|
||||
);
|
||||
tcache_bin_flush_large(tbin, i, 0, tcache);
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
if (tbin->tstats.nrequests != 0) {
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
@@ -326,16 +291,13 @@ tcache_destroy(tcache_t *tcache)
|
||||
tbin->tstats.nrequests;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_PROF
|
||||
if (tcache->prof_accumbytes > 0) {
|
||||
if (config_prof && tcache->prof_accumbytes > 0) {
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
tcache_size = arena_salloc(tcache);
|
||||
if (tcache_size <= small_maxclass) {
|
||||
@@ -389,7 +351,6 @@ tcache_thread_cleanup(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef JEMALLOC_STATS
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
@@ -413,7 +374,6 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
tbin->tstats.nrequests = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
bool
|
||||
tcache_boot(void)
|
||||
|
Reference in New Issue
Block a user