Implement explicit tcache support.
Add the MALLOCX_TCACHE() and MALLOCX_TCACHE_NONE macros, which can be used in conjunction with the *allocx() API. Add the tcache.create, tcache.flush, and tcache.destroy mallctls. This resolves #145.
This commit is contained in:
24
src/arena.c
24
src/arena.c
@@ -2182,8 +2182,7 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
|
||||
void *
|
||||
arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc)
|
||||
size_t extra, size_t alignment, bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
@@ -2201,12 +2200,9 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t usize = sa2u(size + extra, alignment);
|
||||
if (usize == 0)
|
||||
return (NULL);
|
||||
ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
|
||||
arena);
|
||||
} else {
|
||||
ret = arena_malloc(tsd, arena, size + extra, zero,
|
||||
try_tcache_alloc);
|
||||
}
|
||||
ret = ipalloct(tsd, usize, alignment, zero, tcache, arena);
|
||||
} else
|
||||
ret = arena_malloc(tsd, arena, size + extra, zero, tcache);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
@@ -2216,12 +2212,10 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t usize = sa2u(size, alignment);
|
||||
if (usize == 0)
|
||||
return (NULL);
|
||||
ret = ipalloct(tsd, usize, alignment, zero,
|
||||
try_tcache_alloc, arena);
|
||||
} else {
|
||||
ret = arena_malloc(tsd, arena, size, zero,
|
||||
try_tcache_alloc);
|
||||
}
|
||||
ret = ipalloct(tsd, usize, alignment, zero, tcache,
|
||||
arena);
|
||||
} else
|
||||
ret = arena_malloc(tsd, arena, size, zero, tcache);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@@ -2236,7 +2230,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
|
||||
memcpy(ret, ptr, copysize);
|
||||
isqalloc(tsd, ptr, oldsize, try_tcache_dalloc);
|
||||
isqalloc(tsd, ptr, oldsize, tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
@@ -270,7 +270,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipalloct(tsd, usize, CACHELINE, true, NULL,
|
||||
NULL);
|
||||
if (tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@@ -313,7 +314,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0)
|
||||
return;
|
||||
tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipalloct(tsd, usize, CACHELINE, true, NULL, NULL);
|
||||
if (tab == NULL) {
|
||||
/*
|
||||
* An OOM error isn't worth propagating, since it doesn't
|
||||
@@ -389,7 +390,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
ckh->tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
|
||||
ckh->tab = (ckhc_t *)ipalloct(tsd, usize, CACHELINE, true, NULL, NULL);
|
||||
if (ckh->tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
|
113
src/ctl.c
113
src/ctl.c
@@ -110,6 +110,9 @@ CTL_PROTO(opt_prof_gdump)
|
||||
CTL_PROTO(opt_prof_final)
|
||||
CTL_PROTO(opt_prof_leak)
|
||||
CTL_PROTO(opt_prof_accum)
|
||||
CTL_PROTO(tcache_create)
|
||||
CTL_PROTO(tcache_flush)
|
||||
CTL_PROTO(tcache_destroy)
|
||||
CTL_PROTO(arena_i_purge)
|
||||
static void arena_purge(unsigned arena_ind);
|
||||
CTL_PROTO(arena_i_dss)
|
||||
@@ -275,6 +278,12 @@ static const ctl_named_node_t opt_node[] = {
|
||||
{NAME("prof_accum"), CTL(opt_prof_accum)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t tcache_node[] = {
|
||||
{NAME("create"), CTL(tcache_create)},
|
||||
{NAME("flush"), CTL(tcache_flush)},
|
||||
{NAME("destroy"), CTL(tcache_destroy)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t chunk_node[] = {
|
||||
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
|
||||
{NAME("dalloc"), CTL(arena_i_chunk_dalloc)}
|
||||
@@ -474,6 +483,7 @@ static const ctl_named_node_t root_node[] = {
|
||||
{NAME("thread"), CHILD(named, thread)},
|
||||
{NAME("config"), CHILD(named, config)},
|
||||
{NAME("opt"), CHILD(named, opt)},
|
||||
{NAME("tcache"), CHILD(named, tcache)},
|
||||
{NAME("arena"), CHILD(indexed, arena)},
|
||||
{NAME("arenas"), CHILD(named, arenas)},
|
||||
{NAME("prof"), CHILD(named, prof)},
|
||||
@@ -1281,19 +1291,21 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
{
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
arena_t *arena;
|
||||
arena_t *oldarena;
|
||||
unsigned newind, oldind;
|
||||
|
||||
tsd = tsd_fetch();
|
||||
arena = arena_choose(tsd, NULL);
|
||||
if (arena == NULL)
|
||||
oldarena = arena_choose(tsd, NULL);
|
||||
if (oldarena == NULL)
|
||||
return (EAGAIN);
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
newind = oldind = arena->ind;
|
||||
newind = oldind = oldarena->ind;
|
||||
WRITE(newind, unsigned);
|
||||
READ(oldind, unsigned);
|
||||
if (newind != oldind) {
|
||||
arena_t *newarena;
|
||||
|
||||
if (newind >= ctl_stats.narenas) {
|
||||
/* New arena index is out of range. */
|
||||
ret = EFAULT;
|
||||
@@ -1301,8 +1313,8 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
}
|
||||
|
||||
/* Initialize arena if necessary. */
|
||||
arena = arena_get(tsd, newind, true, true);
|
||||
if (arena == NULL) {
|
||||
newarena = arena_get(tsd, newind, true, true);
|
||||
if (newarena == NULL) {
|
||||
ret = EAGAIN;
|
||||
goto label_return;
|
||||
}
|
||||
@@ -1310,8 +1322,10 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
arena_migrate(tsd, oldind, newind);
|
||||
if (config_tcache) {
|
||||
tcache_t *tcache = tsd_tcache_get(tsd);
|
||||
if (tcache != NULL)
|
||||
tcache_arena_reassociate(tcache, arena);
|
||||
if (tcache != NULL) {
|
||||
tcache_arena_reassociate(tcache, oldarena,
|
||||
newarena);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1438,6 +1452,89 @@ label_return:
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static int
|
||||
tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
unsigned tcache_ind;
|
||||
|
||||
if (!config_tcache)
|
||||
return (ENOENT);
|
||||
|
||||
tsd = tsd_fetch();
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
READONLY();
|
||||
if (tcaches_create(tsd, &tcache_ind)) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
READ(tcache_ind, unsigned);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
unsigned tcache_ind;
|
||||
|
||||
if (!config_tcache)
|
||||
return (ENOENT);
|
||||
|
||||
tsd = tsd_fetch();
|
||||
|
||||
WRITEONLY();
|
||||
tcache_ind = UINT_MAX;
|
||||
WRITE(tcache_ind, unsigned);
|
||||
if (tcache_ind == UINT_MAX) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
tcaches_flush(tsd, tcache_ind);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static int
|
||||
tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
unsigned tcache_ind;
|
||||
|
||||
if (!config_tcache)
|
||||
return (ENOENT);
|
||||
|
||||
tsd = tsd_fetch();
|
||||
|
||||
WRITEONLY();
|
||||
tcache_ind = UINT_MAX;
|
||||
WRITE(tcache_ind, unsigned);
|
||||
if (tcache_ind == UINT_MAX) {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
tcaches_destroy(tsd, tcache_ind);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
/* ctl_mutex must be held during execution of this function. */
|
||||
static void
|
||||
arena_purge(unsigned arena_ind)
|
||||
|
36
src/huge.c
36
src/huge.c
@@ -13,7 +13,8 @@ static malloc_mutex_t huge_mtx;
|
||||
static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, bool try_tcache)
|
||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||
tcache_t *tcache)
|
||||
{
|
||||
size_t usize;
|
||||
|
||||
@@ -23,12 +24,12 @@ huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, bool try_tcache)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (huge_palloc(tsd, arena, usize, chunksize, zero, try_tcache));
|
||||
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero, bool try_tcache)
|
||||
bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
@@ -38,7 +39,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
|
||||
/* Allocate an extent node with which to track the chunk. */
|
||||
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
|
||||
CACHELINE, false, try_tcache, true, arena);
|
||||
CACHELINE, false, tcache, true, arena);
|
||||
if (node == NULL)
|
||||
return (NULL);
|
||||
|
||||
@@ -50,7 +51,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
|
||||
usize, alignment, &is_zeroed)) == NULL) {
|
||||
idalloctm(tsd, node, try_tcache, true);
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@@ -307,8 +308,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
|
||||
void *
|
||||
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc)
|
||||
size_t extra, size_t alignment, bool zero, tcache_t *tcache)
|
||||
{
|
||||
void *ret;
|
||||
size_t copysize;
|
||||
@@ -324,11 +324,9 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
*/
|
||||
if (alignment > chunksize) {
|
||||
ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
|
||||
try_tcache_alloc);
|
||||
} else {
|
||||
ret = huge_malloc(tsd, arena, size + extra, zero,
|
||||
try_tcache_alloc);
|
||||
}
|
||||
tcache);
|
||||
} else
|
||||
ret = huge_malloc(tsd, arena, size + extra, zero, tcache);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
@@ -336,11 +334,9 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
/* Try again, this time without extra. */
|
||||
if (alignment > chunksize) {
|
||||
ret = huge_palloc(tsd, arena, size, alignment, zero,
|
||||
try_tcache_alloc);
|
||||
} else {
|
||||
ret = huge_malloc(tsd, arena, size, zero,
|
||||
try_tcache_alloc);
|
||||
}
|
||||
tcache);
|
||||
} else
|
||||
ret = huge_malloc(tsd, arena, size, zero, tcache);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@@ -352,12 +348,12 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(ret, ptr, copysize);
|
||||
isqalloc(tsd, ptr, oldsize, try_tcache_dalloc);
|
||||
isqalloc(tsd, ptr, oldsize, tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
{
|
||||
extent_node_t *node;
|
||||
|
||||
@@ -368,7 +364,7 @@ huge_dalloc(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
|
||||
huge_dalloc_junk(node->addr, node->size);
|
||||
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
||||
idalloctm(tsd, node, try_tcache, true);
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
}
|
||||
|
||||
arena_t *
|
||||
|
158
src/jemalloc.c
158
src/jemalloc.c
@@ -367,6 +367,8 @@ arena_init_locked(unsigned ind)
|
||||
|
||||
/* Expand arenas if necessary. */
|
||||
assert(ind <= narenas_total);
|
||||
if (ind > MALLOCX_ARENA_MAX)
|
||||
return (NULL);
|
||||
if (ind == narenas_total) {
|
||||
unsigned narenas_new = narenas_total + 1;
|
||||
arena_t **arenas_new =
|
||||
@@ -1696,7 +1698,7 @@ irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
ifree(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
{
|
||||
size_t usize;
|
||||
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
@@ -1713,12 +1715,12 @@ ifree(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
rzsize = p2rz(ptr);
|
||||
iqalloc(tsd, ptr, try_tcache);
|
||||
iqalloc(tsd, ptr, tcache);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
|
||||
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
|
||||
{
|
||||
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
@@ -1731,7 +1733,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
rzsize = p2rz(ptr);
|
||||
isqalloc(tsd, ptr, usize, try_tcache);
|
||||
isqalloc(tsd, ptr, usize, tcache);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
}
|
||||
|
||||
@@ -1749,7 +1751,7 @@ je_realloc(void *ptr, size_t size)
|
||||
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
||||
UTRACE(ptr, 0, 0);
|
||||
tsd = tsd_fetch();
|
||||
ifree(tsd, ptr, true);
|
||||
ifree(tsd, ptr, tcache_get(tsd, false));
|
||||
return (NULL);
|
||||
}
|
||||
size = 1;
|
||||
@@ -1802,8 +1804,10 @@ je_free(void *ptr)
|
||||
{
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
if (likely(ptr != NULL))
|
||||
ifree(tsd_fetch(), ptr, true);
|
||||
if (likely(ptr != NULL)) {
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
ifree(tsd, ptr, tcache_get(tsd, false));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1875,7 +1879,7 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
|
||||
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
|
||||
{
|
||||
|
||||
if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
|
||||
@@ -1886,22 +1890,26 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
*usize = sa2u(size, *alignment);
|
||||
}
|
||||
*zero = MALLOCX_ZERO_GET(flags);
|
||||
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
|
||||
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
||||
*tcache = NULL;
|
||||
else
|
||||
*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
||||
} else
|
||||
*tcache = tcache_get(tsd, true);
|
||||
if ((flags & MALLOCX_ARENA_MASK) != 0) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
*try_tcache = false;
|
||||
*arena = arena_get(tsd, arena_ind, true, true);
|
||||
if (unlikely(*arena == NULL))
|
||||
return (true);
|
||||
} else {
|
||||
*try_tcache = true;
|
||||
} else
|
||||
*arena = NULL;
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C bool
|
||||
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena)
|
||||
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
|
||||
{
|
||||
|
||||
if (likely(flags == 0)) {
|
||||
@@ -1909,55 +1917,53 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
||||
assert(usize != 0);
|
||||
*alignment = 0;
|
||||
*zero = false;
|
||||
*try_tcache = true;
|
||||
*tcache = tcache_get(tsd, true);
|
||||
*arena = NULL;
|
||||
return (false);
|
||||
} else {
|
||||
return (imallocx_flags_decode_hard(tsd, size, flags, usize,
|
||||
alignment, zero, try_tcache, arena));
|
||||
alignment, zero, tcache, arena));
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
||||
bool try_tcache, arena_t *arena)
|
||||
tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (alignment != 0) {
|
||||
return (ipalloct(tsd, usize, alignment, zero, try_tcache,
|
||||
arena));
|
||||
}
|
||||
if (alignment != 0)
|
||||
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
|
||||
if (zero)
|
||||
return (icalloct(tsd, usize, try_tcache, arena));
|
||||
return (imalloct(tsd, usize, try_tcache, arena));
|
||||
return (icalloct(tsd, usize, tcache, arena));
|
||||
return (imalloct(tsd, usize, tcache, arena));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
|
||||
size_t alignment, bool zero, bool try_tcache, arena_t *arena)
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (likely(flags == 0))
|
||||
return (imalloc(tsd, size));
|
||||
return (imallocx_flags(tsd, usize, alignment, zero, try_tcache, arena));
|
||||
return (imallocx_flags(tsd, usize, alignment, zero, tcache, arena));
|
||||
}
|
||||
|
||||
static void *
|
||||
imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
|
||||
size_t alignment, bool zero, bool try_tcache, arena_t *arena)
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
|
||||
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
|
||||
p = imalloct(tsd, LARGE_MINCLASS, try_tcache, arena);
|
||||
p = imalloct(tsd, LARGE_MINCLASS, tcache, arena);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else {
|
||||
p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
|
||||
zero, try_tcache, arena);
|
||||
zero, tcache, arena);
|
||||
}
|
||||
|
||||
return (p);
|
||||
@@ -1969,20 +1975,20 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
||||
void *p;
|
||||
size_t alignment;
|
||||
bool zero;
|
||||
bool try_tcache;
|
||||
tcache_t *tcache;
|
||||
arena_t *arena;
|
||||
prof_tctx_t *tctx;
|
||||
|
||||
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
||||
&zero, &try_tcache, &arena)))
|
||||
&zero, &tcache, &arena)))
|
||||
return (NULL);
|
||||
tctx = prof_alloc_prep(tsd, *usize, true);
|
||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
||||
p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
|
||||
zero, try_tcache, arena);
|
||||
zero, tcache, arena);
|
||||
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
||||
p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
|
||||
zero, try_tcache, arena);
|
||||
zero, tcache, arena);
|
||||
} else
|
||||
p = NULL;
|
||||
if (unlikely(p == NULL)) {
|
||||
@@ -1999,7 +2005,7 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
||||
{
|
||||
size_t alignment;
|
||||
bool zero;
|
||||
bool try_tcache;
|
||||
tcache_t *tcache;
|
||||
arena_t *arena;
|
||||
|
||||
if (likely(flags == 0)) {
|
||||
@@ -2009,10 +2015,9 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
||||
}
|
||||
|
||||
if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
|
||||
&alignment, &zero, &try_tcache, &arena)))
|
||||
&alignment, &zero, &tcache, &arena)))
|
||||
return (NULL);
|
||||
return (imallocx_flags(tsd, *usize, alignment, zero, try_tcache,
|
||||
arena));
|
||||
return (imallocx_flags(tsd, *usize, alignment, zero, tcache, arena));
|
||||
}
|
||||
|
||||
void *
|
||||
@@ -2053,8 +2058,8 @@ label_oom:
|
||||
|
||||
static void *
|
||||
irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
||||
size_t alignment, size_t usize, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc, arena_t *arena, prof_tctx_t *tctx)
|
||||
size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
|
||||
prof_tctx_t *tctx)
|
||||
{
|
||||
void *p;
|
||||
|
||||
@@ -2062,13 +2067,13 @@ irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
||||
return (NULL);
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
|
||||
zero, try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
zero, tcache, arena);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else {
|
||||
p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
tcache, arena);
|
||||
}
|
||||
|
||||
return (p);
|
||||
@@ -2076,8 +2081,8 @@ irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
||||
size_t alignment, size_t *usize, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc, arena_t *arena)
|
||||
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
||||
arena_t *arena)
|
||||
{
|
||||
void *p;
|
||||
prof_tctx_t *old_tctx, *tctx;
|
||||
@@ -2086,11 +2091,10 @@ irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
||||
tctx = prof_alloc_prep(tsd, *usize, false);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||
p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
|
||||
alignment, *usize, zero, try_tcache_alloc,
|
||||
try_tcache_dalloc, arena, tctx);
|
||||
alignment, *usize, zero, tcache, arena, tctx);
|
||||
} else {
|
||||
p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
tcache, arena);
|
||||
}
|
||||
if (unlikely(p == NULL)) {
|
||||
prof_alloc_rollback(tsd, tctx, false);
|
||||
@@ -2123,8 +2127,8 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
||||
bool zero = flags & MALLOCX_ZERO;
|
||||
bool try_tcache_alloc, try_tcache_dalloc;
|
||||
arena_t *arena;
|
||||
tcache_t *tcache;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
@@ -2134,18 +2138,19 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
arena_chunk_t *chunk;
|
||||
try_tcache_alloc = false;
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = arena_get(tsd, arena_ind, true, true);
|
||||
if (unlikely(arena == NULL))
|
||||
goto label_oom;
|
||||
try_tcache_dalloc = (chunk == ptr || chunk->arena != arena);
|
||||
} else {
|
||||
try_tcache_alloc = true;
|
||||
try_tcache_dalloc = true;
|
||||
} else
|
||||
arena = NULL;
|
||||
}
|
||||
|
||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
||||
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
||||
tcache = NULL;
|
||||
else
|
||||
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
||||
} else
|
||||
tcache = tcache_get(tsd, true);
|
||||
|
||||
old_usize = isalloc(ptr, config_prof);
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
@@ -2155,12 +2160,12 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
||||
assert(usize != 0);
|
||||
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
|
||||
zero, try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
zero, tcache, arena);
|
||||
if (unlikely(p == NULL))
|
||||
goto label_oom;
|
||||
} else {
|
||||
p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
tcache, arena);
|
||||
if (unlikely(p == NULL))
|
||||
goto label_oom;
|
||||
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
||||
@@ -2319,28 +2324,22 @@ void
|
||||
je_dallocx(void *ptr, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
bool try_tcache;
|
||||
tcache_t *tcache;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(malloc_initialized() || IS_INITIALIZER);
|
||||
|
||||
tsd = tsd_fetch();
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena_t *arena = arena_get(tsd, arena_ind, true, true);
|
||||
/*
|
||||
* If arena is NULL, the application passed an arena that has
|
||||
* never been used before, which is unsupported during
|
||||
* deallocation.
|
||||
*/
|
||||
assert(arena != NULL);
|
||||
try_tcache = (chunk == ptr || chunk->arena != arena);
|
||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
||||
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
||||
tcache = NULL;
|
||||
else
|
||||
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
||||
} else
|
||||
try_tcache = true;
|
||||
tcache = tcache_get(tsd, false);
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
ifree(tsd_fetch(), ptr, try_tcache);
|
||||
ifree(tsd_fetch(), ptr, tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
||||
@@ -2360,7 +2359,7 @@ void
|
||||
je_sdallocx(void *ptr, size_t size, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
bool try_tcache;
|
||||
tcache_t *tcache;
|
||||
size_t usize;
|
||||
|
||||
assert(ptr != NULL);
|
||||
@@ -2369,21 +2368,16 @@ je_sdallocx(void *ptr, size_t size, int flags)
|
||||
assert(usize == isalloc(ptr, config_prof));
|
||||
|
||||
tsd = tsd_fetch();
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena_t *arena = arena_get(tsd, arena_ind, true, true);
|
||||
/*
|
||||
* If arena is NULL, the application passed an arena that has
|
||||
* never been used before, which is unsupported during
|
||||
* deallocation.
|
||||
*/
|
||||
try_tcache = (chunk == ptr || chunk->arena != arena);
|
||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
||||
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
||||
tcache = NULL;
|
||||
else
|
||||
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
||||
} else
|
||||
try_tcache = true;
|
||||
tcache = tcache_get(tsd, false);
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
isfree(tsd, ptr, usize, try_tcache);
|
||||
isfree(tsd, ptr, usize, tcache);
|
||||
}
|
||||
|
||||
size_t
|
||||
|
35
src/prof.c
35
src/prof.c
@@ -540,7 +540,8 @@ prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
|
||||
* Create a single allocation that has space for vec of length bt->len.
|
||||
*/
|
||||
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
|
||||
vec) + (bt->len * sizeof(void *)), false, true, true, NULL);
|
||||
vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
|
||||
true, NULL);
|
||||
if (gctx == NULL)
|
||||
return (NULL);
|
||||
gctx->lock = prof_gctx_mutex_choose();
|
||||
@@ -581,7 +582,7 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
|
||||
prof_leave(tsd, tdata_self);
|
||||
/* Destroy gctx. */
|
||||
malloc_mutex_unlock(gctx->lock);
|
||||
idalloctm(tsd, gctx, true, true);
|
||||
idalloctm(tsd, gctx, tcache_get(tsd, false), true);
|
||||
} else {
|
||||
/*
|
||||
* Compensate for increment in prof_tctx_destroy() or
|
||||
@@ -681,7 +682,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
|
||||
prof_tdata_destroy(tsd, tdata, false);
|
||||
|
||||
if (destroy_tctx)
|
||||
idalloctm(tsd, tctx, true, true);
|
||||
idalloctm(tsd, tctx, tcache_get(tsd, false), true);
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -710,7 +711,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
|
||||
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
|
||||
/* OOM. */
|
||||
prof_leave(tsd, tdata);
|
||||
idalloctm(tsd, gctx.v, true, true);
|
||||
idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
|
||||
return (true);
|
||||
}
|
||||
new_gctx = true;
|
||||
@@ -754,6 +755,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
|
||||
ret.p->prepared = true;
|
||||
malloc_mutex_unlock(tdata->lock);
|
||||
if (not_found) {
|
||||
tcache_t *tcache;
|
||||
void *btkey;
|
||||
prof_gctx_t *gctx;
|
||||
bool new_gctx, error;
|
||||
@@ -767,7 +769,8 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
|
||||
return (NULL);
|
||||
|
||||
/* Link a prof_tctx_t into gctx for this thread. */
|
||||
ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, true, true,
|
||||
tcache = tcache_get(tsd, true);
|
||||
ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
|
||||
NULL);
|
||||
if (ret.p == NULL) {
|
||||
if (new_gctx)
|
||||
@@ -786,7 +789,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
|
||||
if (error) {
|
||||
if (new_gctx)
|
||||
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
|
||||
idalloctm(tsd, ret.v, true, true);
|
||||
idalloctm(tsd, ret.v, tcache, true);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_lock(gctx->lock);
|
||||
@@ -1166,7 +1169,8 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
|
||||
to_destroy);
|
||||
tctx_tree_remove(&gctx->tctxs,
|
||||
to_destroy);
|
||||
idalloctm(tsd, to_destroy, true, true);
|
||||
idalloctm(tsd, to_destroy,
|
||||
tcache_get(tsd, false), true);
|
||||
} else
|
||||
next = NULL;
|
||||
} while (next != NULL);
|
||||
@@ -1644,12 +1648,14 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
||||
char *thread_name, bool active)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
tcache_t *tcache;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/* Initialize an empty cache for this thread. */
|
||||
tcache = tcache_get(tsd, true);
|
||||
tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
|
||||
true, true, NULL);
|
||||
tcache, true, NULL);
|
||||
if (tdata == NULL)
|
||||
return (NULL);
|
||||
|
||||
@@ -1662,7 +1668,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
|
||||
|
||||
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
|
||||
prof_bt_hash, prof_bt_keycomp)) {
|
||||
idalloctm(tsd, tdata, true, true);
|
||||
idalloctm(tsd, tdata, tcache, true);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@@ -1708,16 +1714,18 @@ static void
|
||||
prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
|
||||
bool even_if_attached)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
assert(prof_tdata_should_destroy(tdata, even_if_attached));
|
||||
assert(tsd_prof_tdata_get(tsd) != tdata);
|
||||
|
||||
tdata_tree_remove(&tdatas, tdata);
|
||||
|
||||
tcache = tcache_get(tsd, false);
|
||||
if (tdata->thread_name != NULL)
|
||||
idalloctm(tsd, tdata->thread_name, true, true);
|
||||
idalloctm(tsd, tdata->thread_name, tcache, true);
|
||||
ckh_delete(tsd, &tdata->bt2tctx);
|
||||
idalloctm(tsd, tdata, true, true);
|
||||
idalloctm(tsd, tdata, tcache, true);
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1878,7 +1886,7 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
|
||||
if (size == 1)
|
||||
return ("");
|
||||
|
||||
ret = iallocztm(tsd, size, false, true, true, NULL);
|
||||
ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
memcpy(ret, thread_name, size);
|
||||
@@ -1910,7 +1918,8 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
|
||||
return (EAGAIN);
|
||||
|
||||
if (tdata->thread_name != NULL) {
|
||||
idalloctm(tsd, tdata->thread_name, true, true);
|
||||
idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
|
||||
true);
|
||||
tdata->thread_name = NULL;
|
||||
}
|
||||
if (strlen(s) > 0)
|
||||
|
@@ -27,8 +27,8 @@ quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
|
||||
assert(tsd_nominal(tsd));
|
||||
|
||||
quarantine = (quarantine_t *)iallocztm(tsd, offsetof(quarantine_t, objs)
|
||||
+ ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false, true,
|
||||
true, NULL);
|
||||
+ ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)), false,
|
||||
tcache_get(tsd, true), true, NULL);
|
||||
if (quarantine == NULL)
|
||||
return (NULL);
|
||||
quarantine->curbytes = 0;
|
||||
@@ -55,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
|
||||
if (tsd_quarantine_get(tsd) == NULL)
|
||||
tsd_quarantine_set(tsd, quarantine);
|
||||
else
|
||||
idalloctm(tsd, quarantine, true, true);
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
}
|
||||
|
||||
static quarantine_t *
|
||||
@@ -87,7 +87,7 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
|
||||
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||
sizeof(quarantine_obj_t));
|
||||
}
|
||||
idalloctm(tsd, quarantine, true, true);
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
|
||||
tsd_quarantine_set(tsd, ret);
|
||||
return (ret);
|
||||
@@ -177,7 +177,7 @@ quarantine_cleanup(tsd_t *tsd)
|
||||
quarantine = tsd_quarantine_get(tsd);
|
||||
if (quarantine != NULL) {
|
||||
quarantine_drain(tsd, quarantine, 0);
|
||||
idalloctm(tsd, quarantine, true, true);
|
||||
idalloctm(tsd, quarantine, tcache_get(tsd, false), true);
|
||||
tsd_quarantine_set(tsd, NULL);
|
||||
}
|
||||
}
|
||||
|
166
src/tcache.c
166
src/tcache.c
@@ -13,6 +13,14 @@ static unsigned stack_nelms; /* Total stack elms per tcache. */
|
||||
size_t nhbins;
|
||||
size_t tcache_maxclass;
|
||||
|
||||
tcaches_t *tcaches;
|
||||
|
||||
/* Index of first element within tcaches that has never been used. */
|
||||
static unsigned tcaches_past;
|
||||
|
||||
/* Head of singly linked list tracking available tcaches elements. */
|
||||
static tcaches_t *tcaches_avail;
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
size_t tcache_salloc(const void *ptr)
|
||||
@@ -22,7 +30,7 @@ size_t tcache_salloc(const void *ptr)
|
||||
}
|
||||
|
||||
void
|
||||
tcache_event_hard(tcache_t *tcache)
|
||||
tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
index_t binind = tcache->next_gc_bin;
|
||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||
@@ -33,11 +41,11 @@ tcache_event_hard(tcache_t *tcache)
|
||||
* Flush (ceiling) 3/4 of the objects below the low water mark.
|
||||
*/
|
||||
if (binind < NBINS) {
|
||||
tcache_bin_flush_small(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
tcache_bin_flush_small(tsd, tbin, binind, tbin->ncached
|
||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
} else {
|
||||
tcache_bin_flush_large(tbin, binind, tbin->ncached -
|
||||
tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
|
||||
- tbin->low_water + (tbin->low_water >> 2), tcache);
|
||||
}
|
||||
/*
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that the
|
||||
@@ -62,11 +70,12 @@ tcache_event_hard(tcache_t *tcache)
|
||||
}
|
||||
|
||||
void *
|
||||
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, index_t binind)
|
||||
tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
index_t binind)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
arena_tcache_fill_small(tcache->arena, tbin, binind,
|
||||
arena_tcache_fill_small(arena_choose(tsd, NULL), tbin, binind,
|
||||
config_prof ? tcache->prof_accumbytes : 0);
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes = 0;
|
||||
@@ -76,9 +85,10 @@ tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, index_t binind)
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
unsigned rem, tcache_t *tcache)
|
||||
{
|
||||
arena_t *arena;
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
bool merged_stats = false;
|
||||
@@ -86,21 +96,23 @@ tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
assert(binind < NBINS);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
assert(arena != NULL);
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
arena_t *bin_arena = chunk->arena;
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
|
||||
if (config_prof && arena == tcache->arena) {
|
||||
if (config_prof && bin_arena == arena) {
|
||||
if (arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
tcache->prof_accumbytes = 0;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
if (config_stats && arena == tcache->arena) {
|
||||
if (config_stats && bin_arena == arena) {
|
||||
assert(!merged_stats);
|
||||
merged_stats = true;
|
||||
bin->stats.nflushes++;
|
||||
@@ -112,12 +124,12 @@ tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena) {
|
||||
if (chunk->arena == bin_arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_bits_t *bitselm =
|
||||
arena_bitselm_get(chunk, pageind);
|
||||
arena_dalloc_bin_junked_locked(arena, chunk,
|
||||
arena_dalloc_bin_junked_locked(bin_arena, chunk,
|
||||
ptr, bitselm);
|
||||
} else {
|
||||
/*
|
||||
@@ -137,7 +149,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_bin_t *bin = &tcache->arena->bins[binind];
|
||||
arena_bin_t *bin = &arena->bins[binind];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nflushes++;
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
@@ -153,9 +165,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
}
|
||||
|
||||
void
|
||||
tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
tcache_t *tcache)
|
||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
unsigned rem, tcache_t *tcache)
|
||||
{
|
||||
arena_t *arena;
|
||||
void *ptr;
|
||||
unsigned i, nflush, ndeferred;
|
||||
bool merged_stats = false;
|
||||
@@ -163,17 +176,19 @@ tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
assert(binind < nhbins);
|
||||
assert(rem <= tbin->ncached);
|
||||
|
||||
arena = arena_choose(tsd, NULL);
|
||||
assert(arena != NULL);
|
||||
for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *arena = chunk->arena;
|
||||
arena_t *locked_arena = chunk->arena;
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
idump = false;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if ((config_prof || config_stats) && arena == tcache->arena) {
|
||||
malloc_mutex_lock(&locked_arena->lock);
|
||||
if ((config_prof || config_stats) && locked_arena == arena) {
|
||||
if (config_prof) {
|
||||
idump = arena_prof_accum_locked(arena,
|
||||
tcache->prof_accumbytes);
|
||||
@@ -193,9 +208,9 @@ tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena) {
|
||||
arena_dalloc_large_junked_locked(arena, chunk,
|
||||
ptr);
|
||||
if (chunk->arena == locked_arena) {
|
||||
arena_dalloc_large_junked_locked(locked_arena,
|
||||
chunk, ptr);
|
||||
} else {
|
||||
/*
|
||||
* This object was allocated via a different
|
||||
@@ -207,7 +222,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
ndeferred++;
|
||||
}
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
malloc_mutex_unlock(&locked_arena->lock);
|
||||
if (config_prof && idump)
|
||||
prof_idump();
|
||||
}
|
||||
@@ -216,7 +231,6 @@ tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
* The flush loop didn't happen to flush to this thread's
|
||||
* arena, so the stats didn't get merged. Manually do so now.
|
||||
*/
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[binind - NBINS].nrequests +=
|
||||
@@ -243,27 +257,37 @@ tcache_arena_associate(tcache_t *tcache, arena_t *arena)
|
||||
ql_tail_insert(&arena->tcache_ql, tcache, link);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
tcache->arena = arena;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_reassociate(tcache_t *tcache, arena_t *arena)
|
||||
tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
|
||||
{
|
||||
|
||||
tcache_arena_dissociate(tcache);
|
||||
tcache_arena_associate(tcache, arena);
|
||||
tcache_arena_dissociate(tcache, oldarena);
|
||||
tcache_arena_associate(tcache, newarena);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_dissociate(tcache_t *tcache)
|
||||
tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (config_stats) {
|
||||
/* Unlink from list of extant tcaches. */
|
||||
malloc_mutex_lock(&tcache->arena->lock);
|
||||
ql_remove(&tcache->arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, tcache->arena);
|
||||
malloc_mutex_unlock(&tcache->arena->lock);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
if (config_debug) {
|
||||
bool in_ql = false;
|
||||
tcache_t *iter;
|
||||
ql_foreach(iter, &arena->tcache_ql, link) {
|
||||
if (iter == tcache) {
|
||||
in_ql = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(in_ql);
|
||||
}
|
||||
ql_remove(&arena->tcache_ql, tcache, link);
|
||||
tcache_stats_merge(tcache, arena);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -298,7 +322,7 @@ tcache_create(tsd_t *tsd, arena_t *arena)
|
||||
/* Avoid false cacheline sharing. */
|
||||
size = sa2u(size, CACHELINE);
|
||||
|
||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, arena);
|
||||
tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get());
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
|
||||
@@ -318,16 +342,17 @@ tcache_create(tsd_t *tsd, arena_t *arena)
|
||||
static void
|
||||
tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
arena_t *arena;
|
||||
unsigned i;
|
||||
|
||||
tcache_arena_dissociate(tcache);
|
||||
arena = arena_choose(tsd, NULL);
|
||||
tcache_arena_dissociate(tcache, arena);
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_small(tbin, i, 0, tcache);
|
||||
tcache_bin_flush_small(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
bin->stats.nrequests += tbin->tstats.nrequests;
|
||||
@@ -337,10 +362,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
|
||||
for (; i < nhbins; i++) {
|
||||
tcache_bin_t *tbin = &tcache->tbins[i];
|
||||
tcache_bin_flush_large(tbin, i, 0, tcache);
|
||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||
|
||||
if (config_stats && tbin->tstats.nrequests != 0) {
|
||||
arena_t *arena = tcache->arena;
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
arena->stats.nrequests_large += tbin->tstats.nrequests;
|
||||
arena->stats.lstats[i - NBINS].nrequests +=
|
||||
@@ -350,7 +374,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
}
|
||||
|
||||
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||
arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
|
||||
arena_prof_accum(arena, tcache->prof_accumbytes))
|
||||
prof_idump();
|
||||
|
||||
idalloctm(tsd, tcache, false, true);
|
||||
@@ -404,6 +428,66 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
tcaches_create(tsd_t *tsd, unsigned *r_ind)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
tcaches_t *elm;
|
||||
|
||||
if (tcaches == NULL) {
|
||||
tcaches = base_alloc(sizeof(tcache_t *) *
|
||||
(MALLOCX_TCACHE_MAX+1));
|
||||
if (tcaches == NULL)
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
|
||||
return (true);
|
||||
tcache = tcache_create(tsd, a0get());
|
||||
if (tcache == NULL)
|
||||
return (true);
|
||||
|
||||
if (tcaches_avail != NULL) {
|
||||
elm = tcaches_avail;
|
||||
tcaches_avail = tcaches_avail->next;
|
||||
elm->tcache = tcache;
|
||||
*r_ind = (elm - tcaches) / sizeof(tcaches_t);
|
||||
} else {
|
||||
elm = &tcaches[tcaches_past];
|
||||
elm->tcache = tcache;
|
||||
*r_ind = tcaches_past;
|
||||
tcaches_past++;
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static void
|
||||
tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
|
||||
{
|
||||
|
||||
if (elm->tcache == NULL)
|
||||
return;
|
||||
tcache_destroy(tsd, elm->tcache);
|
||||
elm->tcache = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_flush(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
|
||||
tcaches_elm_flush(tsd, &tcaches[ind]);
|
||||
}
|
||||
|
||||
void
|
||||
tcaches_destroy(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
tcaches_elm_flush(tsd, elm);
|
||||
elm->next = tcaches_avail;
|
||||
tcaches_avail = elm;
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot(void)
|
||||
{
|
||||
|
Reference in New Issue
Block a user