Convert all tsd variables to reside in a single tsd structure.
This commit is contained in:
23
src/arena.c
23
src/arena.c
@@ -2058,7 +2058,7 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
}
|
||||
|
||||
void *
|
||||
arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc)
|
||||
{
|
||||
@@ -2078,9 +2078,12 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t usize = sa2u(size + extra, alignment);
|
||||
if (usize == 0)
|
||||
return (NULL);
|
||||
ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
|
||||
} else
|
||||
ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
|
||||
ret = ipalloct(tsd, usize, alignment, zero, try_tcache_alloc,
|
||||
arena);
|
||||
} else {
|
||||
ret = arena_malloc(tsd, arena, size + extra, zero,
|
||||
try_tcache_alloc);
|
||||
}
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
@@ -2090,10 +2093,12 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t usize = sa2u(size, alignment);
|
||||
if (usize == 0)
|
||||
return (NULL);
|
||||
ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
|
||||
arena);
|
||||
} else
|
||||
ret = arena_malloc(arena, size, zero, try_tcache_alloc);
|
||||
ret = ipalloct(tsd, usize, alignment, zero,
|
||||
try_tcache_alloc, arena);
|
||||
} else {
|
||||
ret = arena_malloc(tsd, arena, size, zero,
|
||||
try_tcache_alloc);
|
||||
}
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@@ -2108,7 +2113,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqalloc(ptr, try_tcache_dalloc);
|
||||
iqalloc(tsd, ptr, try_tcache_dalloc);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
38
src/ckh.c
38
src/ckh.c
@@ -40,8 +40,8 @@
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static bool ckh_grow(ckh_t *ckh);
|
||||
static void ckh_shrink(ckh_t *ckh);
|
||||
static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
|
||||
static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
@@ -243,7 +243,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
|
||||
}
|
||||
|
||||
static bool
|
||||
ckh_grow(ckh_t *ckh)
|
||||
ckh_grow(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
bool ret;
|
||||
ckhc_t *tab, *ttab;
|
||||
@@ -270,7 +270,7 @@ ckh_grow(ckh_t *ckh)
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
|
||||
if (tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@@ -282,12 +282,12 @@ ckh_grow(ckh_t *ckh)
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (ckh_rebuild(ckh, tab) == false) {
|
||||
idalloc(tab);
|
||||
idalloc(tsd, tab);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloc(ckh->tab);
|
||||
idalloc(tsd, ckh->tab);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
}
|
||||
@@ -298,7 +298,7 @@ label_return:
|
||||
}
|
||||
|
||||
static void
|
||||
ckh_shrink(ckh_t *ckh)
|
||||
ckh_shrink(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
ckhc_t *tab, *ttab;
|
||||
size_t lg_curcells, usize;
|
||||
@@ -313,7 +313,7 @@ ckh_shrink(ckh_t *ckh)
|
||||
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (usize == 0)
|
||||
return;
|
||||
tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
|
||||
if (tab == NULL) {
|
||||
/*
|
||||
* An OOM error isn't worth propagating, since it doesn't
|
||||
@@ -328,7 +328,7 @@ ckh_shrink(ckh_t *ckh)
|
||||
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
|
||||
|
||||
if (ckh_rebuild(ckh, tab) == false) {
|
||||
idalloc(tab);
|
||||
idalloc(tsd, tab);
|
||||
#ifdef CKH_COUNT
|
||||
ckh->nshrinks++;
|
||||
#endif
|
||||
@@ -336,7 +336,7 @@ ckh_shrink(ckh_t *ckh)
|
||||
}
|
||||
|
||||
/* Rebuilding failed, so back out partially rebuilt table. */
|
||||
idalloc(ckh->tab);
|
||||
idalloc(tsd, ckh->tab);
|
||||
ckh->tab = tab;
|
||||
ckh->lg_curbuckets = lg_prevbuckets;
|
||||
#ifdef CKH_COUNT
|
||||
@@ -345,7 +345,8 @@ ckh_shrink(ckh_t *ckh)
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh_keycomp_t *keycomp)
|
||||
{
|
||||
bool ret;
|
||||
size_t mincells, usize;
|
||||
@@ -388,7 +389,7 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
|
||||
ckh->tab = (ckhc_t *)ipalloc(tsd, usize, CACHELINE, true);
|
||||
if (ckh->tab == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
@@ -400,7 +401,7 @@ label_return:
|
||||
}
|
||||
|
||||
void
|
||||
ckh_delete(ckh_t *ckh)
|
||||
ckh_delete(tsd_t *tsd, ckh_t *ckh)
|
||||
{
|
||||
|
||||
assert(ckh != NULL);
|
||||
@@ -417,7 +418,7 @@ ckh_delete(ckh_t *ckh)
|
||||
(unsigned long long)ckh->nrelocs);
|
||||
#endif
|
||||
|
||||
idalloc(ckh->tab);
|
||||
idalloc(tsd, ckh->tab);
|
||||
if (config_debug)
|
||||
memset(ckh, 0x5a, sizeof(ckh_t));
|
||||
}
|
||||
@@ -452,7 +453,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
@@ -464,7 +465,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
|
||||
#endif
|
||||
|
||||
while (ckh_try_insert(ckh, &key, &data)) {
|
||||
if (ckh_grow(ckh)) {
|
||||
if (ckh_grow(tsd, ckh)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
@@ -476,7 +477,8 @@ label_return:
|
||||
}
|
||||
|
||||
bool
|
||||
ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
|
||||
void **data)
|
||||
{
|
||||
size_t cell;
|
||||
|
||||
@@ -497,7 +499,7 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
|
||||
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
|
||||
> ckh->lg_minbuckets) {
|
||||
/* Ignore error due to OOM. */
|
||||
ckh_shrink(ckh);
|
||||
ckh_shrink(tsd, ckh);
|
||||
}
|
||||
|
||||
return (false);
|
||||
|
93
src/ctl.c
93
src/ctl.c
@@ -565,18 +565,23 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
|
||||
static bool
|
||||
ctl_grow(void)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
ctl_arena_stats_t *astats;
|
||||
arena_t **tarenas;
|
||||
|
||||
tsd = tsd_tryget();
|
||||
if (tsd == NULL)
|
||||
return (true);
|
||||
|
||||
/* Allocate extended arena stats and arenas arrays. */
|
||||
astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
|
||||
astats = (ctl_arena_stats_t *)imalloc(tsd, (ctl_stats.narenas + 2) *
|
||||
sizeof(ctl_arena_stats_t));
|
||||
if (astats == NULL)
|
||||
return (true);
|
||||
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
|
||||
tarenas = (arena_t **)imalloc(tsd, (ctl_stats.narenas + 1) *
|
||||
sizeof(arena_t *));
|
||||
if (tarenas == NULL) {
|
||||
idalloc(astats);
|
||||
idalloc(tsd, astats);
|
||||
return (true);
|
||||
}
|
||||
|
||||
@@ -585,8 +590,8 @@ ctl_grow(void)
|
||||
sizeof(ctl_arena_stats_t));
|
||||
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
|
||||
if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
|
||||
idalloc(tarenas);
|
||||
idalloc(astats);
|
||||
idalloc(tsd, tarenas);
|
||||
idalloc(tsd, astats);
|
||||
return (true);
|
||||
}
|
||||
/* Swap merged stats to their new location. */
|
||||
@@ -623,7 +628,7 @@ ctl_grow(void)
|
||||
* base_alloc()).
|
||||
*/
|
||||
if (ctl_stats.narenas != narenas_auto)
|
||||
idalloc(arenas_old);
|
||||
idalloc(tsd, arenas_old);
|
||||
}
|
||||
ctl_stats.arenas = astats;
|
||||
ctl_stats.narenas++;
|
||||
@@ -1105,6 +1110,31 @@ label_return: \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
|
||||
static int \
|
||||
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
void *newp, size_t newlen) \
|
||||
{ \
|
||||
int ret; \
|
||||
t oldval; \
|
||||
tsd_t *tsd; \
|
||||
\
|
||||
if ((c) == false) \
|
||||
return (ENOENT); \
|
||||
READONLY(); \
|
||||
tsd = tsd_tryget(); \
|
||||
if (tsd == NULL) { \
|
||||
ret = EAGAIN; \
|
||||
goto label_return; \
|
||||
} \
|
||||
oldval = (m(tsd)); \
|
||||
READ(oldval, t); \
|
||||
\
|
||||
ret = 0; \
|
||||
label_return: \
|
||||
return (ret); \
|
||||
}
|
||||
|
||||
#define CTL_RO_BOOL_CONFIG_GEN(n) \
|
||||
static int \
|
||||
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
|
||||
@@ -1194,10 +1224,15 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
unsigned newind, oldind;
|
||||
|
||||
tsd = tsd_tryget();
|
||||
if (tsd == NULL)
|
||||
return (EAGAIN);
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
newind = oldind = choose_arena(NULL)->ind;
|
||||
newind = oldind = choose_arena(tsd, NULL)->ind;
|
||||
WRITE(newind, unsigned);
|
||||
READ(oldind, unsigned);
|
||||
if (newind != oldind) {
|
||||
@@ -1224,14 +1259,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
|
||||
/* Set new arena association. */
|
||||
if (config_tcache) {
|
||||
tcache_t *tcache;
|
||||
if ((uintptr_t)(tcache = *tcache_tsd_get()) >
|
||||
(uintptr_t)TCACHE_STATE_MAX) {
|
||||
tcache_t *tcache = tsd_tcache_get(tsd);
|
||||
if (tcache != NULL) {
|
||||
tcache_arena_dissociate(tcache);
|
||||
tcache_arena_associate(tcache, arena);
|
||||
}
|
||||
}
|
||||
arenas_tsd_set(&arena);
|
||||
|
||||
tsd_arena_set(tsd, arena);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
@@ -1240,14 +1275,14 @@ label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
CTL_RO_NL_CGEN(config_stats, thread_allocated,
|
||||
thread_allocated_tsd_get()->allocated, uint64_t)
|
||||
CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
|
||||
&thread_allocated_tsd_get()->allocated, uint64_t *)
|
||||
CTL_RO_NL_CGEN(config_stats, thread_deallocated,
|
||||
thread_allocated_tsd_get()->deallocated, uint64_t)
|
||||
CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
|
||||
&thread_allocated_tsd_get()->deallocated, uint64_t *)
|
||||
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
|
||||
uint64_t)
|
||||
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
|
||||
uint64_t *)
|
||||
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
|
||||
uint64_t)
|
||||
CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
|
||||
tsd_thread_deallocatedp_get, uint64_t *)
|
||||
|
||||
static int
|
||||
thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
@@ -1305,11 +1340,20 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
|
||||
oldname = prof_thread_name_get();
|
||||
if (newp != NULL) {
|
||||
tsd_t *tsd;
|
||||
|
||||
if (newlen != sizeof(const char *)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
if (prof_thread_name_set(*(const char **)newp)) {
|
||||
|
||||
tsd = tsd_tryget();
|
||||
if (tsd == NULL) {
|
||||
ret = EAGAIN;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
if (prof_thread_name_set(tsd, *(const char **)newp)) {
|
||||
ret = EAGAIN;
|
||||
goto label_return;
|
||||
}
|
||||
@@ -1675,6 +1719,7 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
{
|
||||
int ret;
|
||||
size_t lg_sample = lg_prof_sample;
|
||||
tsd_t *tsd;
|
||||
|
||||
if (config_prof == false)
|
||||
return (ENOENT);
|
||||
@@ -1684,7 +1729,13 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
||||
if (lg_sample >= (sizeof(uint64_t) << 3))
|
||||
lg_sample = (sizeof(uint64_t) << 3) - 1;
|
||||
|
||||
prof_reset(lg_sample);
|
||||
tsd = tsd_tryget();
|
||||
if (tsd == NULL) {
|
||||
ret = EAGAIN;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
prof_reset(tsd, lg_sample);
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
|
21
src/huge.c
21
src/huge.c
@@ -13,14 +13,15 @@ static malloc_mutex_t huge_mtx;
|
||||
static extent_tree_t huge;
|
||||
|
||||
void *
|
||||
huge_malloc(arena_t *arena, size_t size, bool zero)
|
||||
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero)
|
||||
{
|
||||
|
||||
return (huge_palloc(arena, size, chunksize, zero));
|
||||
return (huge_palloc(tsd, arena, size, chunksize, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
|
||||
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t csize;
|
||||
@@ -45,7 +46,7 @@ huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
|
||||
* it is possible to make correct junk/zero fill decisions below.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
arena = choose_arena(arena);
|
||||
arena = choose_arena(tsd, arena);
|
||||
ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
|
||||
if (ret == NULL) {
|
||||
base_node_dalloc(node);
|
||||
@@ -90,7 +91,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
||||
}
|
||||
|
||||
void *
|
||||
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
|
||||
{
|
||||
void *ret;
|
||||
@@ -106,18 +107,18 @@ huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
* space and copying.
|
||||
*/
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(arena, size + extra, alignment, zero);
|
||||
ret = huge_palloc(tsd, arena, size + extra, alignment, zero);
|
||||
else
|
||||
ret = huge_malloc(arena, size + extra, zero);
|
||||
ret = huge_malloc(tsd, arena, size + extra, zero);
|
||||
|
||||
if (ret == NULL) {
|
||||
if (extra == 0)
|
||||
return (NULL);
|
||||
/* Try again, this time without extra. */
|
||||
if (alignment > chunksize)
|
||||
ret = huge_palloc(arena, size, alignment, zero);
|
||||
ret = huge_palloc(tsd, arena, size, alignment, zero);
|
||||
else
|
||||
ret = huge_malloc(arena, size, zero);
|
||||
ret = huge_malloc(tsd, arena, size, zero);
|
||||
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@@ -129,7 +130,7 @@ huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||
*/
|
||||
copysize = (size < oldsize) ? size : oldsize;
|
||||
memcpy(ret, ptr, copysize);
|
||||
iqalloc(ptr, try_tcache_dalloc);
|
||||
iqalloc(tsd, ptr, try_tcache_dalloc);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
|
366
src/jemalloc.c
366
src/jemalloc.c
@@ -5,8 +5,6 @@
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, arenas, arena_t *, NULL)
|
||||
malloc_tsd_data(, thread_allocated, thread_allocated_t,
|
||||
THREAD_ALLOCATED_INITIALIZER)
|
||||
|
||||
/* Runtime configuration options. */
|
||||
const char *je_malloc_conf;
|
||||
@@ -142,7 +140,7 @@ arenas_extend(unsigned ind)
|
||||
|
||||
/* Slow path, called only by choose_arena(). */
|
||||
arena_t *
|
||||
choose_arena_hard(void)
|
||||
choose_arena_hard(tsd_t *tsd)
|
||||
{
|
||||
arena_t *ret;
|
||||
|
||||
@@ -196,11 +194,32 @@ choose_arena_hard(void)
|
||||
malloc_mutex_unlock(&arenas_lock);
|
||||
}
|
||||
|
||||
arenas_tsd_set(&ret);
|
||||
tsd_arena_set(tsd, ret);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
thread_allocated_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
thread_deallocated_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
void
|
||||
arena_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
static void
|
||||
stats_print_atexit(void)
|
||||
{
|
||||
@@ -691,7 +710,11 @@ malloc_init_hard(void)
|
||||
#endif
|
||||
malloc_initializer = INITIALIZER;
|
||||
|
||||
malloc_tsd_boot();
|
||||
if (malloc_tsd_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (config_prof)
|
||||
prof_boot0();
|
||||
|
||||
@@ -726,7 +749,7 @@ malloc_init_hard(void)
|
||||
|
||||
arena_boot();
|
||||
|
||||
if (config_tcache && tcache_boot0()) {
|
||||
if (config_tcache && tcache_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
@@ -759,27 +782,6 @@ malloc_init_hard(void)
|
||||
return (true);
|
||||
}
|
||||
|
||||
/* Initialize allocation counters before any allocations can occur. */
|
||||
if (config_stats && thread_allocated_tsd_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (arenas_tsd_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (config_tcache && tcache_boot1()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (config_fill && quarantine_boot()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (config_prof && prof_boot2()) {
|
||||
malloc_mutex_unlock(&init_lock);
|
||||
return (true);
|
||||
@@ -863,36 +865,36 @@ malloc_init_hard(void)
|
||||
*/
|
||||
|
||||
static void *
|
||||
imalloc_prof_sample(size_t usize, prof_tctx_t *tctx)
|
||||
imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tctx == NULL)
|
||||
return (NULL);
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
p = imalloc(LARGE_MINCLASS);
|
||||
p = imalloc(tsd, LARGE_MINCLASS);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else
|
||||
p = imalloc(usize);
|
||||
p = imalloc(tsd, usize);
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imalloc_prof(size_t usize)
|
||||
imalloc_prof(tsd_t *tsd, size_t usize)
|
||||
{
|
||||
void *p;
|
||||
prof_tctx_t *tctx;
|
||||
|
||||
tctx = prof_alloc_prep(usize, true);
|
||||
tctx = prof_alloc_prep(tsd, usize, true);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||
p = imalloc_prof_sample(usize, tctx);
|
||||
p = imalloc_prof_sample(tsd, usize, tctx);
|
||||
else
|
||||
p = imalloc(usize);
|
||||
p = imalloc(tsd, usize);
|
||||
if (p == NULL) {
|
||||
prof_alloc_rollback(tctx, true);
|
||||
prof_alloc_rollback(tsd, tctx, true);
|
||||
return (NULL);
|
||||
}
|
||||
prof_malloc(p, usize, tctx);
|
||||
@@ -901,32 +903,33 @@ imalloc_prof(size_t usize)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imalloc_body(size_t size, size_t *usize)
|
||||
imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
|
||||
{
|
||||
|
||||
if (unlikely(malloc_init()))
|
||||
if (unlikely(malloc_init()) || unlikely((*tsd = tsd_tryget()) == NULL))
|
||||
return (NULL);
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
*usize = s2u(size);
|
||||
return (imalloc_prof(*usize));
|
||||
return (imalloc_prof(*tsd, *usize));
|
||||
}
|
||||
|
||||
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
||||
*usize = s2u(size);
|
||||
return (imalloc(size));
|
||||
return (imalloc(*tsd, size));
|
||||
}
|
||||
|
||||
void *
|
||||
je_malloc(size_t size)
|
||||
{
|
||||
void *ret;
|
||||
tsd_t *tsd;
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
if (size == 0)
|
||||
size = 1;
|
||||
|
||||
ret = imalloc_body(size, &usize);
|
||||
ret = imalloc_body(size, &tsd, &usize);
|
||||
if (unlikely(ret == NULL)) {
|
||||
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
||||
malloc_write("<jemalloc>: Error in malloc(): "
|
||||
@@ -937,7 +940,7 @@ je_malloc(size_t size)
|
||||
}
|
||||
if (config_stats && likely(ret != NULL)) {
|
||||
assert(usize == isalloc(ret, config_prof));
|
||||
thread_allocated_tsd_get()->allocated += usize;
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
}
|
||||
UTRACE(0, size, ret);
|
||||
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
|
||||
@@ -945,7 +948,8 @@ je_malloc(size_t size)
|
||||
}
|
||||
|
||||
static void *
|
||||
imemalign_prof_sample(size_t alignment, size_t usize, prof_tctx_t *tctx)
|
||||
imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
|
||||
prof_tctx_t *tctx)
|
||||
{
|
||||
void *p;
|
||||
|
||||
@@ -953,29 +957,29 @@ imemalign_prof_sample(size_t alignment, size_t usize, prof_tctx_t *tctx)
|
||||
return (NULL);
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
|
||||
p = imalloc(LARGE_MINCLASS);
|
||||
p = imalloc(tsd, LARGE_MINCLASS);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else
|
||||
p = ipalloc(usize, alignment, false);
|
||||
p = ipalloc(tsd, usize, alignment, false);
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imemalign_prof(size_t alignment, size_t usize)
|
||||
imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
|
||||
{
|
||||
void *p;
|
||||
prof_tctx_t *tctx;
|
||||
|
||||
tctx = prof_alloc_prep(usize, true);
|
||||
tctx = prof_alloc_prep(tsd, usize, true);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||
p = imemalign_prof_sample(alignment, usize, tctx);
|
||||
p = imemalign_prof_sample(tsd, alignment, usize, tctx);
|
||||
else
|
||||
p = ipalloc(usize, alignment, false);
|
||||
p = ipalloc(tsd, usize, alignment, false);
|
||||
if (p == NULL) {
|
||||
prof_alloc_rollback(tctx, true);
|
||||
prof_alloc_rollback(tsd, tctx, true);
|
||||
return (NULL);
|
||||
}
|
||||
prof_malloc(p, usize, tctx);
|
||||
@@ -988,12 +992,13 @@ static int
|
||||
imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
||||
{
|
||||
int ret;
|
||||
tsd_t *tsd;
|
||||
size_t usize;
|
||||
void *result;
|
||||
|
||||
assert(min_alignment != 0);
|
||||
|
||||
if (unlikely(malloc_init())) {
|
||||
if (unlikely(malloc_init()) || unlikely((tsd = tsd_tryget()) == NULL)) {
|
||||
result = NULL;
|
||||
goto label_oom;
|
||||
} else {
|
||||
@@ -1020,9 +1025,9 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof)
|
||||
result = imemalign_prof(alignment, usize);
|
||||
result = imemalign_prof(tsd, alignment, usize);
|
||||
else
|
||||
result = ipalloc(usize, alignment, false);
|
||||
result = ipalloc(tsd, usize, alignment, false);
|
||||
if (unlikely(result == NULL))
|
||||
goto label_oom;
|
||||
}
|
||||
@@ -1032,7 +1037,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
||||
label_return:
|
||||
if (config_stats && likely(result != NULL)) {
|
||||
assert(usize == isalloc(result, config_prof));
|
||||
thread_allocated_tsd_get()->allocated += usize;
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
}
|
||||
UTRACE(0, size, result);
|
||||
return (ret);
|
||||
@@ -1072,36 +1077,36 @@ je_aligned_alloc(size_t alignment, size_t size)
|
||||
}
|
||||
|
||||
static void *
|
||||
icalloc_prof_sample(size_t usize, prof_tctx_t *tctx)
|
||||
icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tctx == NULL)
|
||||
return (NULL);
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
p = icalloc(LARGE_MINCLASS);
|
||||
p = icalloc(tsd, LARGE_MINCLASS);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else
|
||||
p = icalloc(usize);
|
||||
p = icalloc(tsd, usize);
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
icalloc_prof(size_t usize)
|
||||
icalloc_prof(tsd_t *tsd, size_t usize)
|
||||
{
|
||||
void *p;
|
||||
prof_tctx_t *tctx;
|
||||
|
||||
tctx = prof_alloc_prep(usize, true);
|
||||
tctx = prof_alloc_prep(tsd, usize, true);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||
p = icalloc_prof_sample(usize, tctx);
|
||||
p = icalloc_prof_sample(tsd, usize, tctx);
|
||||
else
|
||||
p = icalloc(usize);
|
||||
p = icalloc(tsd, usize);
|
||||
if (p == NULL) {
|
||||
prof_alloc_rollback(tctx, true);
|
||||
prof_alloc_rollback(tsd, tctx, true);
|
||||
return (NULL);
|
||||
}
|
||||
prof_malloc(p, usize, tctx);
|
||||
@@ -1113,10 +1118,11 @@ void *
|
||||
je_calloc(size_t num, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
tsd_t *tsd;
|
||||
size_t num_size;
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
if (unlikely(malloc_init())) {
|
||||
if (unlikely(malloc_init()) || unlikely((tsd = tsd_tryget()) == NULL)) {
|
||||
num_size = 0;
|
||||
ret = NULL;
|
||||
goto label_return;
|
||||
@@ -1144,11 +1150,11 @@ je_calloc(size_t num, size_t size)
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = s2u(num_size);
|
||||
ret = icalloc_prof(usize);
|
||||
ret = icalloc_prof(tsd, usize);
|
||||
} else {
|
||||
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
||||
usize = s2u(num_size);
|
||||
ret = icalloc(num_size);
|
||||
ret = icalloc(tsd, num_size);
|
||||
}
|
||||
|
||||
label_return:
|
||||
@@ -1162,7 +1168,7 @@ label_return:
|
||||
}
|
||||
if (config_stats && likely(ret != NULL)) {
|
||||
assert(usize == isalloc(ret, config_prof));
|
||||
thread_allocated_tsd_get()->allocated += usize;
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
}
|
||||
UTRACE(0, num_size, ret);
|
||||
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
|
||||
@@ -1170,44 +1176,44 @@ label_return:
|
||||
}
|
||||
|
||||
static void *
|
||||
irealloc_prof_sample(void *oldptr, size_t usize, prof_tctx_t *tctx)
|
||||
irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t usize, prof_tctx_t *tctx)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tctx == NULL)
|
||||
return (NULL);
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
p = iralloc(oldptr, LARGE_MINCLASS, 0, false);
|
||||
p = iralloc(tsd, oldptr, LARGE_MINCLASS, 0, false);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else
|
||||
p = iralloc(oldptr, usize, 0, false);
|
||||
p = iralloc(tsd, oldptr, usize, 0, false);
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
irealloc_prof(void *oldptr, size_t old_usize, size_t usize)
|
||||
irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
|
||||
{
|
||||
void *p;
|
||||
prof_tctx_t *old_tctx, *tctx;
|
||||
|
||||
old_tctx = prof_tctx_get(oldptr);
|
||||
tctx = prof_alloc_prep(usize, true);
|
||||
tctx = prof_alloc_prep(tsd, usize, true);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||
p = irealloc_prof_sample(oldptr, usize, tctx);
|
||||
p = irealloc_prof_sample(tsd, oldptr, usize, tctx);
|
||||
else
|
||||
p = iralloc(oldptr, usize, 0, false);
|
||||
p = iralloc(tsd, oldptr, usize, 0, false);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
prof_realloc(p, usize, tctx, true, old_usize, old_tctx);
|
||||
prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
ifree(void *ptr, bool try_tcache)
|
||||
ifree(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
{
|
||||
size_t usize;
|
||||
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
@@ -1217,19 +1223,19 @@ ifree(void *ptr, bool try_tcache)
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = isalloc(ptr, config_prof);
|
||||
prof_free(ptr, usize);
|
||||
prof_free(tsd, ptr, usize);
|
||||
} else if (config_stats || config_valgrind)
|
||||
usize = isalloc(ptr, config_prof);
|
||||
if (config_stats)
|
||||
thread_allocated_tsd_get()->deallocated += usize;
|
||||
if (config_stats && likely(tsd != NULL))
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
rzsize = p2rz(ptr);
|
||||
iqalloc(ptr, try_tcache);
|
||||
iqalloc(tsd, ptr, try_tcache);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void
|
||||
isfree(void *ptr, size_t usize, bool try_tcache)
|
||||
isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
|
||||
{
|
||||
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
@@ -1237,12 +1243,12 @@ isfree(void *ptr, size_t usize, bool try_tcache)
|
||||
assert(malloc_initialized || IS_INITIALIZER);
|
||||
|
||||
if (config_prof && opt_prof)
|
||||
prof_free(ptr, usize);
|
||||
if (config_stats)
|
||||
thread_allocated_tsd_get()->deallocated += usize;
|
||||
prof_free(tsd, ptr, usize);
|
||||
if (config_stats && likely(tsd != NULL))
|
||||
*tsd_thread_deallocatedp_get(tsd) += usize;
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
rzsize = p2rz(ptr);
|
||||
isqalloc(ptr, usize, try_tcache);
|
||||
isqalloc(tsd, ptr, usize, try_tcache);
|
||||
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
||||
}
|
||||
|
||||
@@ -1250,6 +1256,7 @@ void *
|
||||
je_realloc(void *ptr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
tsd_t *tsd;
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t old_usize = 0;
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
@@ -1258,7 +1265,8 @@ je_realloc(void *ptr, size_t size)
|
||||
if (ptr != NULL) {
|
||||
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
||||
UTRACE(ptr, 0, 0);
|
||||
ifree(ptr, true);
|
||||
tsd = tsd_tryget();
|
||||
ifree(tsd, ptr, true);
|
||||
return (NULL);
|
||||
}
|
||||
size = 1;
|
||||
@@ -1268,24 +1276,29 @@ je_realloc(void *ptr, size_t size)
|
||||
assert(malloc_initialized || IS_INITIALIZER);
|
||||
malloc_thread_init();
|
||||
|
||||
if ((config_prof && opt_prof) || config_stats ||
|
||||
(config_valgrind && unlikely(in_valgrind)))
|
||||
old_usize = isalloc(ptr, config_prof);
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
|
||||
if ((tsd = tsd_tryget()) != NULL) {
|
||||
if ((config_prof && opt_prof) || config_stats ||
|
||||
(config_valgrind && unlikely(in_valgrind)))
|
||||
old_usize = isalloc(ptr, config_prof);
|
||||
if (config_valgrind && unlikely(in_valgrind)) {
|
||||
old_rzsize = config_prof ? p2rz(ptr) :
|
||||
u2rz(old_usize);
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = s2u(size);
|
||||
ret = irealloc_prof(ptr, old_usize, usize);
|
||||
} else {
|
||||
if (config_stats || (config_valgrind &&
|
||||
unlikely(in_valgrind)))
|
||||
if (config_prof && opt_prof) {
|
||||
usize = s2u(size);
|
||||
ret = iralloc(ptr, size, 0, false);
|
||||
}
|
||||
ret = irealloc_prof(tsd, ptr, old_usize, usize);
|
||||
} else {
|
||||
if (config_stats || (config_valgrind &&
|
||||
unlikely(in_valgrind)))
|
||||
usize = s2u(size);
|
||||
ret = iralloc(tsd, ptr, size, 0, false);
|
||||
}
|
||||
} else
|
||||
ret = NULL;
|
||||
} else {
|
||||
/* realloc(NULL, size) is equivalent to malloc(size). */
|
||||
ret = imalloc_body(size, &usize);
|
||||
ret = imalloc_body(size, &tsd, &usize);
|
||||
}
|
||||
|
||||
if (unlikely(ret == NULL)) {
|
||||
@@ -1297,11 +1310,11 @@ je_realloc(void *ptr, size_t size)
|
||||
set_errno(ENOMEM);
|
||||
}
|
||||
if (config_stats && likely(ret != NULL)) {
|
||||
thread_allocated_t *ta;
|
||||
assert(usize == isalloc(ret, config_prof));
|
||||
ta = thread_allocated_tsd_get();
|
||||
ta->allocated += usize;
|
||||
ta->deallocated += old_usize;
|
||||
if (tsd != NULL) {
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||
}
|
||||
}
|
||||
UTRACE(ptr, size, ret);
|
||||
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
|
||||
@@ -1315,7 +1328,7 @@ je_free(void *ptr)
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
if (likely(ptr != NULL))
|
||||
ifree(ptr, true);
|
||||
ifree(tsd_tryget(), ptr, true);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1425,50 +1438,52 @@ imallocx_flags_decode(size_t size, int flags, size_t *usize, size_t *alignment,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imallocx_flags(size_t usize, size_t alignment, bool zero, bool try_tcache,
|
||||
arena_t *arena)
|
||||
imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
||||
bool try_tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (alignment != 0)
|
||||
return (ipalloct(usize, alignment, zero, try_tcache, arena));
|
||||
if (alignment != 0) {
|
||||
return (ipalloct(tsd, usize, alignment, zero, try_tcache,
|
||||
arena));
|
||||
}
|
||||
if (zero)
|
||||
return (icalloct(usize, try_tcache, arena));
|
||||
return (imalloct(usize, try_tcache, arena));
|
||||
return (icalloct(tsd, usize, try_tcache, arena));
|
||||
return (imalloct(tsd, usize, try_tcache, arena));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imallocx_maybe_flags(size_t size, int flags, size_t usize, size_t alignment,
|
||||
bool zero, bool try_tcache, arena_t *arena)
|
||||
imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
|
||||
size_t alignment, bool zero, bool try_tcache, arena_t *arena)
|
||||
{
|
||||
|
||||
if (likely(flags == 0))
|
||||
return (imalloc(size));
|
||||
return (imallocx_flags(usize, alignment, zero, try_tcache, arena));
|
||||
return (imalloc(tsd, size));
|
||||
return (imallocx_flags(tsd, usize, alignment, zero, try_tcache, arena));
|
||||
}
|
||||
|
||||
static void *
|
||||
imallocx_prof_sample(size_t size, int flags, size_t usize, size_t alignment,
|
||||
bool zero, bool try_tcache, arena_t *arena)
|
||||
imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
|
||||
size_t alignment, bool zero, bool try_tcache, arena_t *arena)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
|
||||
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
|
||||
p = imalloct(LARGE_MINCLASS, try_tcache, arena);
|
||||
p = imalloct(tsd, LARGE_MINCLASS, try_tcache, arena);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else {
|
||||
p = imallocx_maybe_flags(size, flags, usize, alignment, zero,
|
||||
try_tcache, arena);
|
||||
p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
|
||||
zero, try_tcache, arena);
|
||||
}
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imallocx_prof(size_t size, int flags, size_t *usize)
|
||||
imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
||||
{
|
||||
void *p;
|
||||
size_t alignment;
|
||||
@@ -1479,17 +1494,17 @@ imallocx_prof(size_t size, int flags, size_t *usize)
|
||||
|
||||
imallocx_flags_decode(size, flags, usize, &alignment, &zero,
|
||||
&try_tcache, &arena);
|
||||
tctx = prof_alloc_prep(*usize, true);
|
||||
tctx = prof_alloc_prep(tsd, *usize, true);
|
||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
||||
p = imallocx_maybe_flags(size, flags, *usize, alignment, zero,
|
||||
try_tcache, arena);
|
||||
p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
|
||||
zero, try_tcache, arena);
|
||||
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
||||
p = imallocx_prof_sample(size, flags, *usize, alignment, zero,
|
||||
try_tcache, arena);
|
||||
p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
|
||||
zero, try_tcache, arena);
|
||||
} else
|
||||
p = NULL;
|
||||
if (unlikely(p == NULL)) {
|
||||
prof_alloc_rollback(tctx, true);
|
||||
prof_alloc_rollback(tsd, tctx, true);
|
||||
return (NULL);
|
||||
}
|
||||
prof_malloc(p, *usize, tctx);
|
||||
@@ -1498,7 +1513,7 @@ imallocx_prof(size_t size, int flags, size_t *usize)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
imallocx_no_prof(size_t size, int flags, size_t *usize)
|
||||
imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
||||
{
|
||||
size_t alignment;
|
||||
bool zero;
|
||||
@@ -1508,35 +1523,39 @@ imallocx_no_prof(size_t size, int flags, size_t *usize)
|
||||
if (likely(flags == 0)) {
|
||||
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
||||
*usize = s2u(size);
|
||||
return (imalloc(size));
|
||||
return (imalloc(tsd, size));
|
||||
}
|
||||
|
||||
imallocx_flags_decode_hard(size, flags, usize, &alignment, &zero,
|
||||
&try_tcache, &arena);
|
||||
return (imallocx_flags(*usize, alignment, zero, try_tcache, arena));
|
||||
return (imallocx_flags(tsd, *usize, alignment, zero, try_tcache,
|
||||
arena));
|
||||
}
|
||||
|
||||
void *
|
||||
je_mallocx(size_t size, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
void *p;
|
||||
size_t usize;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
if (unlikely(malloc_init()))
|
||||
if (unlikely(malloc_init()) || unlikely((tsd = tsd_tryget()) == NULL))
|
||||
goto label_oom;
|
||||
|
||||
if (config_prof && opt_prof)
|
||||
p = imallocx_prof(size, flags, &usize);
|
||||
p = imallocx_prof(tsd, size, flags, &usize);
|
||||
else
|
||||
p = imallocx_no_prof(size, flags, &usize);
|
||||
p = imallocx_no_prof(tsd, size, flags, &usize);
|
||||
if (unlikely(p == NULL))
|
||||
goto label_oom;
|
||||
|
||||
if (config_stats) {
|
||||
tsd_t *tsd = tsd_tryget();
|
||||
assert(usize == isalloc(p, config_prof));
|
||||
thread_allocated_tsd_get()->allocated += usize;
|
||||
if (tsd != NULL)
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
}
|
||||
UTRACE(0, size, p);
|
||||
JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
|
||||
@@ -1551,47 +1570,47 @@ label_oom:
|
||||
}
|
||||
|
||||
static void *
|
||||
irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
|
||||
bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
|
||||
prof_tctx_t *tctx)
|
||||
irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t size, size_t alignment,
|
||||
size_t usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
|
||||
arena_t *arena, prof_tctx_t *tctx)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (tctx == NULL)
|
||||
return (NULL);
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
p = iralloct(oldptr, LARGE_MINCLASS, alignment, zero,
|
||||
p = iralloct(tsd, oldptr, LARGE_MINCLASS, alignment, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
if (p == NULL)
|
||||
return (NULL);
|
||||
arena_prof_promoted(p, usize);
|
||||
} else {
|
||||
p = iralloct(oldptr, size, alignment, zero, try_tcache_alloc,
|
||||
try_tcache_dalloc, arena);
|
||||
p = iralloct(tsd, oldptr, size, alignment, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
}
|
||||
|
||||
return (p);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C void *
|
||||
irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
|
||||
size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
|
||||
arena_t *arena)
|
||||
irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
||||
size_t alignment, size_t *usize, bool zero, bool try_tcache_alloc,
|
||||
bool try_tcache_dalloc, arena_t *arena)
|
||||
{
|
||||
void *p;
|
||||
prof_tctx_t *old_tctx, *tctx;
|
||||
|
||||
old_tctx = prof_tctx_get(oldptr);
|
||||
tctx = prof_alloc_prep(*usize, false);
|
||||
tctx = prof_alloc_prep(tsd, *usize, false);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||
p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena, tctx);
|
||||
p = irallocx_prof_sample(tsd, oldptr, size, alignment, *usize,
|
||||
zero, try_tcache_alloc, try_tcache_dalloc, arena, tctx);
|
||||
} else {
|
||||
p = iralloct(oldptr, size, alignment, zero, try_tcache_alloc,
|
||||
try_tcache_dalloc, arena);
|
||||
p = iralloct(tsd, oldptr, size, alignment, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
}
|
||||
if (unlikely(p == NULL)) {
|
||||
prof_alloc_rollback(tctx, false);
|
||||
prof_alloc_rollback(tsd, tctx, false);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@@ -1606,7 +1625,7 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
|
||||
*/
|
||||
*usize = isalloc(p, config_prof);
|
||||
}
|
||||
prof_realloc(p, *usize, tctx, false, old_usize, old_tctx);
|
||||
prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
|
||||
|
||||
return (p);
|
||||
}
|
||||
@@ -1615,6 +1634,7 @@ void *
|
||||
je_rallocx(void *ptr, size_t size, int flags)
|
||||
{
|
||||
void *p;
|
||||
tsd_t *tsd;
|
||||
size_t usize;
|
||||
UNUSED size_t old_usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
@@ -1628,6 +1648,9 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
assert(malloc_initialized || IS_INITIALIZER);
|
||||
malloc_thread_init();
|
||||
|
||||
if (unlikely((tsd = tsd_tryget()) == NULL))
|
||||
goto label_oom;
|
||||
|
||||
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
||||
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
||||
arena_chunk_t *chunk;
|
||||
@@ -1651,12 +1674,12 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
if (config_prof && opt_prof) {
|
||||
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
||||
assert(usize != 0);
|
||||
p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
|
||||
try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
|
||||
zero, try_tcache_alloc, try_tcache_dalloc, arena);
|
||||
if (unlikely(p == NULL))
|
||||
goto label_oom;
|
||||
} else {
|
||||
p = iralloct(ptr, size, alignment, zero, try_tcache_alloc,
|
||||
p = iralloct(tsd, ptr, size, alignment, zero, try_tcache_alloc,
|
||||
try_tcache_dalloc, arena);
|
||||
if (unlikely(p == NULL))
|
||||
goto label_oom;
|
||||
@@ -1665,10 +1688,8 @@ je_rallocx(void *ptr, size_t size, int flags)
|
||||
}
|
||||
|
||||
if (config_stats) {
|
||||
thread_allocated_t *ta;
|
||||
ta = thread_allocated_tsd_get();
|
||||
ta->allocated += usize;
|
||||
ta->deallocated += old_usize;
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||
}
|
||||
UTRACE(ptr, size, p);
|
||||
JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
|
||||
@@ -1724,8 +1745,8 @@ ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
||||
ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, arena_t *arena)
|
||||
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero, arena_t *arena)
|
||||
{
|
||||
size_t max_usize, usize;
|
||||
prof_tctx_t *old_tctx, *tctx;
|
||||
@@ -1739,7 +1760,7 @@ ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
|
||||
*/
|
||||
max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
|
||||
alignment);
|
||||
tctx = prof_alloc_prep(max_usize, false);
|
||||
tctx = prof_alloc_prep(tsd, max_usize, false);
|
||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
|
||||
alignment, zero, max_usize, arena, tctx);
|
||||
@@ -1748,10 +1769,10 @@ ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
|
||||
zero, arena);
|
||||
}
|
||||
if (unlikely(usize == old_usize)) {
|
||||
prof_alloc_rollback(tctx, false);
|
||||
prof_alloc_rollback(tsd, tctx, false);
|
||||
return (usize);
|
||||
}
|
||||
prof_realloc(ptr, usize, tctx, false, old_usize, old_tctx);
|
||||
prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
|
||||
|
||||
return (usize);
|
||||
}
|
||||
@@ -1759,6 +1780,7 @@ ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
|
||||
size_t
|
||||
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
size_t usize, old_usize;
|
||||
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
||||
@@ -1778,12 +1800,16 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
arena = NULL;
|
||||
|
||||
old_usize = isalloc(ptr, config_prof);
|
||||
if (unlikely((tsd = tsd_tryget()) == NULL)) {
|
||||
usize = old_usize;
|
||||
goto label_not_resized;
|
||||
}
|
||||
if (config_valgrind && unlikely(in_valgrind))
|
||||
old_rzsize = u2rz(old_usize);
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
|
||||
zero, arena);
|
||||
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
|
||||
alignment, zero, arena);
|
||||
} else {
|
||||
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
||||
zero, arena);
|
||||
@@ -1792,10 +1818,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
||||
goto label_not_resized;
|
||||
|
||||
if (config_stats) {
|
||||
thread_allocated_t *ta;
|
||||
ta = thread_allocated_tsd_get();
|
||||
ta->allocated += usize;
|
||||
ta->deallocated += old_usize;
|
||||
*tsd_thread_allocatedp_get(tsd) += usize;
|
||||
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
||||
}
|
||||
JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
|
||||
old_rzsize, false, zero);
|
||||
@@ -1839,7 +1863,7 @@ je_dallocx(void *ptr, int flags)
|
||||
try_tcache = true;
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
ifree(ptr, try_tcache);
|
||||
ifree(tsd_tryget(), ptr, try_tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE_C size_t
|
||||
@@ -1875,7 +1899,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
|
||||
try_tcache = true;
|
||||
|
||||
UTRACE(ptr, 0, 0);
|
||||
isfree(ptr, usize, try_tcache);
|
||||
isfree(tsd_tryget(), ptr, usize, try_tcache);
|
||||
}
|
||||
|
||||
size_t
|
||||
@@ -2072,9 +2096,9 @@ a0alloc(size_t size, bool zero)
|
||||
size = 1;
|
||||
|
||||
if (size <= arena_maxclass)
|
||||
return (arena_malloc(arenas[0], size, zero, false));
|
||||
return (arena_malloc(NULL, arenas[0], size, zero, false));
|
||||
else
|
||||
return (huge_malloc(NULL, size, zero));
|
||||
return (huge_malloc(NULL, arenas[0], size, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
@@ -2101,7 +2125,7 @@ a0free(void *ptr)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(chunk, ptr, false);
|
||||
arena_dalloc(NULL, chunk, ptr, false);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
}
|
||||
|
244
src/prof.c
244
src/prof.c
@@ -14,8 +14,6 @@
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
|
||||
|
||||
bool opt_prof = false;
|
||||
bool opt_prof_active = true;
|
||||
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
|
||||
@@ -102,9 +100,9 @@ static bool prof_booted = false;
|
||||
*/
|
||||
|
||||
static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
|
||||
static void prof_tctx_destroy(prof_tctx_t *tctx);
|
||||
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
|
||||
static bool prof_tdata_should_destroy(prof_tdata_t *tdata);
|
||||
static void prof_tdata_destroy(prof_tdata_t *tdata);
|
||||
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata);
|
||||
|
||||
/******************************************************************************/
|
||||
/* Red-black trees. */
|
||||
@@ -151,7 +149,7 @@ rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
|
||||
/******************************************************************************/
|
||||
|
||||
void
|
||||
prof_alloc_rollback(prof_tctx_t *tctx, bool updated)
|
||||
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
@@ -164,8 +162,8 @@ prof_alloc_rollback(prof_tctx_t *tctx, bool updated)
|
||||
* potential for sample bias is minimal except in contrived
|
||||
* programs.
|
||||
*/
|
||||
tdata = prof_tdata_get(true);
|
||||
if ((uintptr_t)tdata > (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata != NULL)
|
||||
prof_sample_threshold_update(tctx->tdata);
|
||||
}
|
||||
|
||||
@@ -173,7 +171,7 @@ prof_alloc_rollback(prof_tctx_t *tctx, bool updated)
|
||||
malloc_mutex_lock(tctx->tdata->lock);
|
||||
tctx->prepared = false;
|
||||
if (prof_tctx_should_destroy(tctx))
|
||||
prof_tctx_destroy(tctx);
|
||||
prof_tctx_destroy(tsd, tctx);
|
||||
else
|
||||
malloc_mutex_unlock(tctx->tdata->lock);
|
||||
}
|
||||
@@ -195,7 +193,7 @@ prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx) {
|
||||
}
|
||||
|
||||
void
|
||||
prof_free_sampled_object(size_t usize, prof_tctx_t *tctx)
|
||||
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
|
||||
{
|
||||
|
||||
malloc_mutex_lock(tctx->tdata->lock);
|
||||
@@ -205,7 +203,7 @@ prof_free_sampled_object(size_t usize, prof_tctx_t *tctx)
|
||||
tctx->cnts.curbytes -= usize;
|
||||
|
||||
if (prof_tctx_should_destroy(tctx))
|
||||
prof_tctx_destroy(tctx);
|
||||
prof_tctx_destroy(tsd, tctx);
|
||||
else
|
||||
malloc_mutex_unlock(tctx->tdata->lock);
|
||||
}
|
||||
@@ -494,13 +492,13 @@ prof_tdata_mutex_choose(uint64_t thr_uid)
|
||||
}
|
||||
|
||||
static prof_gctx_t *
|
||||
prof_gctx_create(prof_bt_t *bt)
|
||||
prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
|
||||
{
|
||||
/*
|
||||
* Create a single allocation that has space for vec of length bt->len.
|
||||
*/
|
||||
prof_gctx_t *gctx = (prof_gctx_t *)imalloc(offsetof(prof_gctx_t, vec) +
|
||||
(bt->len * sizeof(void *)));
|
||||
prof_gctx_t *gctx = (prof_gctx_t *)imalloc(tsd, offsetof(prof_gctx_t,
|
||||
vec) + (bt->len * sizeof(void *)));
|
||||
if (gctx == NULL)
|
||||
return (NULL);
|
||||
gctx->lock = prof_gctx_mutex_choose();
|
||||
@@ -518,7 +516,7 @@ prof_gctx_create(prof_bt_t *bt)
|
||||
}
|
||||
|
||||
static void
|
||||
prof_gctx_maybe_destroy(prof_gctx_t *gctx, prof_tdata_t *tdata)
|
||||
prof_gctx_maybe_destroy(tsd_t *tsd, prof_gctx_t *gctx, prof_tdata_t *tdata)
|
||||
{
|
||||
|
||||
cassert(config_prof);
|
||||
@@ -534,12 +532,12 @@ prof_gctx_maybe_destroy(prof_gctx_t *gctx, prof_tdata_t *tdata)
|
||||
malloc_mutex_lock(gctx->lock);
|
||||
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
|
||||
/* Remove gctx from bt2gctx. */
|
||||
if (ckh_remove(&bt2gctx, &gctx->bt, NULL, NULL))
|
||||
if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
|
||||
not_reached();
|
||||
prof_leave(tdata);
|
||||
/* Destroy gctx. */
|
||||
malloc_mutex_unlock(gctx->lock);
|
||||
idalloc(gctx);
|
||||
idalloc(tsd, gctx);
|
||||
} else {
|
||||
/*
|
||||
* Compensate for increment in prof_tctx_destroy() or
|
||||
@@ -580,7 +578,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
|
||||
|
||||
/* tctx->tdata->lock is held upon entry, and released before return. */
|
||||
static void
|
||||
prof_tctx_destroy(prof_tctx_t *tctx)
|
||||
prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
|
||||
{
|
||||
prof_tdata_t *tdata = tctx->tdata;
|
||||
prof_gctx_t *gctx = tctx->gctx;
|
||||
@@ -592,7 +590,7 @@ prof_tctx_destroy(prof_tctx_t *tctx)
|
||||
assert(tctx->cnts.accumobjs == 0);
|
||||
assert(tctx->cnts.accumbytes == 0);
|
||||
|
||||
ckh_remove(&tdata->bt2tctx, &gctx->bt, NULL, NULL);
|
||||
ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
|
||||
destroy_tdata = prof_tdata_should_destroy(tdata);
|
||||
malloc_mutex_unlock(tdata->lock);
|
||||
|
||||
@@ -618,17 +616,17 @@ prof_tctx_destroy(prof_tctx_t *tctx)
|
||||
destroy_gctx = false;
|
||||
malloc_mutex_unlock(gctx->lock);
|
||||
if (destroy_gctx)
|
||||
prof_gctx_maybe_destroy(gctx, tdata);
|
||||
prof_gctx_maybe_destroy(tsd, gctx, tdata);
|
||||
|
||||
if (destroy_tdata)
|
||||
prof_tdata_destroy(tdata);
|
||||
prof_tdata_destroy(tsd, tdata);
|
||||
|
||||
idalloc(tctx);
|
||||
idalloc(tsd, tctx);
|
||||
}
|
||||
|
||||
static bool
|
||||
prof_lookup_global(prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey,
|
||||
prof_gctx_t **p_gctx, bool *p_new_gctx)
|
||||
prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
|
||||
void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
|
||||
{
|
||||
union {
|
||||
prof_gctx_t *p;
|
||||
@@ -643,16 +641,16 @@ prof_lookup_global(prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey,
|
||||
prof_enter(tdata);
|
||||
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
|
||||
/* bt has never been seen before. Insert it. */
|
||||
gctx.p = prof_gctx_create(bt);
|
||||
gctx.p = prof_gctx_create(tsd, bt);
|
||||
if (gctx.v == NULL) {
|
||||
prof_leave(tdata);
|
||||
return (true);
|
||||
}
|
||||
btkey.p = &gctx.p->bt;
|
||||
if (ckh_insert(&bt2gctx, btkey.v, gctx.v)) {
|
||||
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
|
||||
/* OOM. */
|
||||
prof_leave(tdata);
|
||||
idalloc(gctx.v);
|
||||
idalloc(tsd, gctx.v);
|
||||
return (true);
|
||||
}
|
||||
new_gctx = true;
|
||||
@@ -675,7 +673,7 @@ prof_lookup_global(prof_bt_t *bt, prof_tdata_t *tdata, void **p_btkey,
|
||||
}
|
||||
|
||||
prof_tctx_t *
|
||||
prof_lookup(prof_bt_t *bt)
|
||||
prof_lookup(tsd_t *tsd, prof_bt_t *bt)
|
||||
{
|
||||
union {
|
||||
prof_tctx_t *p;
|
||||
@@ -686,8 +684,8 @@ prof_lookup(prof_bt_t *bt)
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tdata = prof_tdata_get(tsd, false);
|
||||
if (tdata == NULL)
|
||||
return (NULL);
|
||||
|
||||
malloc_mutex_lock(tdata->lock);
|
||||
@@ -704,15 +702,15 @@ prof_lookup(prof_bt_t *bt)
|
||||
* This thread's cache lacks bt. Look for it in the global
|
||||
* cache.
|
||||
*/
|
||||
if (prof_lookup_global(bt, tdata, &btkey, &gctx,
|
||||
if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
|
||||
&new_gctx))
|
||||
return (NULL);
|
||||
|
||||
/* Link a prof_tctx_t into gctx for this thread. */
|
||||
ret.v = imalloc(sizeof(prof_tctx_t));
|
||||
ret.v = imalloc(tsd, sizeof(prof_tctx_t));
|
||||
if (ret.p == NULL) {
|
||||
if (new_gctx)
|
||||
prof_gctx_maybe_destroy(gctx, tdata);
|
||||
prof_gctx_maybe_destroy(tsd, gctx, tdata);
|
||||
return (NULL);
|
||||
}
|
||||
ret.p->tdata = tdata;
|
||||
@@ -721,12 +719,12 @@ prof_lookup(prof_bt_t *bt)
|
||||
ret.p->prepared = true;
|
||||
ret.p->state = prof_tctx_state_nominal;
|
||||
malloc_mutex_lock(tdata->lock);
|
||||
error = ckh_insert(&tdata->bt2tctx, btkey, ret.v);
|
||||
error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
|
||||
malloc_mutex_unlock(tdata->lock);
|
||||
if (error) {
|
||||
if (new_gctx)
|
||||
prof_gctx_maybe_destroy(gctx, tdata);
|
||||
idalloc(ret.v);
|
||||
prof_gctx_maybe_destroy(tsd, gctx, tdata);
|
||||
idalloc(tsd, ret.v);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_lock(gctx->lock);
|
||||
@@ -798,10 +796,13 @@ size_t
|
||||
prof_bt_count(void)
|
||||
{
|
||||
size_t bt_count;
|
||||
tsd_t *tsd;
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (0);
|
||||
tdata = prof_tdata_get(tsd, false);
|
||||
if (tdata == NULL)
|
||||
return (0);
|
||||
|
||||
prof_enter(tdata);
|
||||
@@ -989,6 +990,7 @@ static prof_tctx_t *
|
||||
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
|
||||
{
|
||||
prof_tctx_t *ret;
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
|
||||
switch (tctx->state) {
|
||||
case prof_tctx_state_nominal:
|
||||
@@ -1000,7 +1002,7 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
|
||||
case prof_tctx_state_purgatory:
|
||||
ret = tctx_tree_next(tctxs, tctx);
|
||||
tctx_tree_remove(tctxs, tctx);
|
||||
idalloc(tctx);
|
||||
idalloc(tsd, tctx);
|
||||
goto label_return;
|
||||
default:
|
||||
not_reached();
|
||||
@@ -1049,7 +1051,8 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
|
||||
static prof_gctx_t *
|
||||
prof_gctx_finish_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
|
||||
{
|
||||
prof_tdata_t *tdata = (prof_tdata_t *)arg;
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
prof_tdata_t *tdata = prof_tdata_get(tsd, false);
|
||||
prof_tctx_t *next;
|
||||
bool destroy_gctx;
|
||||
|
||||
@@ -1057,13 +1060,13 @@ prof_gctx_finish_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
|
||||
next = NULL;
|
||||
do {
|
||||
next = tctx_tree_iter(&gctx->tctxs, next, prof_tctx_finish_iter,
|
||||
NULL);
|
||||
tsd);
|
||||
} while (next != NULL);
|
||||
gctx->nlimbo--;
|
||||
destroy_gctx = prof_gctx_should_destroy(gctx);
|
||||
malloc_mutex_unlock(gctx->lock);
|
||||
if (destroy_gctx)
|
||||
prof_gctx_maybe_destroy(gctx, tdata);
|
||||
prof_gctx_maybe_destroy(tsd, gctx, tdata);
|
||||
|
||||
return (NULL);
|
||||
}
|
||||
@@ -1277,7 +1280,7 @@ label_return:
|
||||
}
|
||||
|
||||
static bool
|
||||
prof_dump(bool propagate_err, const char *filename, bool leakcheck)
|
||||
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
prof_cnt_t cnt_all;
|
||||
@@ -1291,8 +1294,8 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tdata = prof_tdata_get(tsd, false);
|
||||
if (tdata == NULL)
|
||||
return (true);
|
||||
|
||||
malloc_mutex_lock(&prof_dump_mtx);
|
||||
@@ -1341,7 +1344,7 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
|
||||
if (prof_dump_close(propagate_err))
|
||||
goto label_open_close_error;
|
||||
|
||||
gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tdata);
|
||||
gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tsd);
|
||||
malloc_mutex_unlock(&prof_dump_mtx);
|
||||
|
||||
if (leakcheck)
|
||||
@@ -1351,7 +1354,7 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
|
||||
label_write_error:
|
||||
prof_dump_close(propagate_err);
|
||||
label_open_close_error:
|
||||
gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tdata);
|
||||
gctx_tree_iter(&gctxs, NULL, prof_gctx_finish_iter, tsd);
|
||||
malloc_mutex_unlock(&prof_dump_mtx);
|
||||
return (true);
|
||||
}
|
||||
@@ -1381,24 +1384,28 @@ prof_dump_filename(char *filename, char v, uint64_t vseq)
|
||||
static void
|
||||
prof_fdump(void)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
char filename[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (prof_booted == false)
|
||||
return;
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return;
|
||||
|
||||
if (opt_prof_final && opt_prof_prefix[0] != '\0') {
|
||||
malloc_mutex_lock(&prof_dump_seq_mtx);
|
||||
prof_dump_filename(filename, 'f', VSEQ_INVALID);
|
||||
malloc_mutex_unlock(&prof_dump_seq_mtx);
|
||||
prof_dump(false, filename, opt_prof_leak);
|
||||
prof_dump(tsd, false, filename, opt_prof_leak);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
prof_idump(void)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
prof_tdata_t *tdata;
|
||||
char filename[PATH_MAX + 1];
|
||||
|
||||
@@ -1406,8 +1413,10 @@ prof_idump(void)
|
||||
|
||||
if (prof_booted == false)
|
||||
return;
|
||||
tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return;
|
||||
tdata = prof_tdata_get(tsd, false);
|
||||
if (tdata == NULL)
|
||||
return;
|
||||
if (tdata->enq) {
|
||||
tdata->enq_idump = true;
|
||||
@@ -1419,19 +1428,22 @@ prof_idump(void)
|
||||
prof_dump_filename(filename, 'i', prof_dump_iseq);
|
||||
prof_dump_iseq++;
|
||||
malloc_mutex_unlock(&prof_dump_seq_mtx);
|
||||
prof_dump(false, filename, false);
|
||||
prof_dump(tsd, false, filename, false);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
prof_mdump(const char *filename)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
char filename_buf[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
if (opt_prof == false || prof_booted == false)
|
||||
return (true);
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (true);
|
||||
|
||||
if (filename == NULL) {
|
||||
/* No filename specified, so automatically generate one. */
|
||||
@@ -1443,12 +1455,13 @@ prof_mdump(const char *filename)
|
||||
malloc_mutex_unlock(&prof_dump_seq_mtx);
|
||||
filename = filename_buf;
|
||||
}
|
||||
return (prof_dump(true, filename, false));
|
||||
return (prof_dump(tsd, true, filename, false));
|
||||
}
|
||||
|
||||
void
|
||||
prof_gdump(void)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
prof_tdata_t *tdata;
|
||||
char filename[DUMP_FILENAME_BUFSIZE];
|
||||
|
||||
@@ -1456,8 +1469,10 @@ prof_gdump(void)
|
||||
|
||||
if (prof_booted == false)
|
||||
return;
|
||||
tdata = prof_tdata_get(false);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return;
|
||||
tdata = prof_tdata_get(tsd, false);
|
||||
if (tdata == NULL)
|
||||
return;
|
||||
if (tdata->enq) {
|
||||
tdata->enq_gdump = true;
|
||||
@@ -1469,7 +1484,7 @@ prof_gdump(void)
|
||||
prof_dump_filename(filename, 'u', prof_dump_useq);
|
||||
prof_dump_useq++;
|
||||
malloc_mutex_unlock(&prof_dump_seq_mtx);
|
||||
prof_dump(false, filename, false);
|
||||
prof_dump(tsd, false, filename, false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1510,14 +1525,14 @@ prof_thr_uid_alloc(void)
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_init_impl(uint64_t thr_uid)
|
||||
prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
/* Initialize an empty cache for this thread. */
|
||||
tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
|
||||
tdata = (prof_tdata_t *)imalloc(tsd, sizeof(prof_tdata_t));
|
||||
if (tdata == NULL)
|
||||
return (NULL);
|
||||
|
||||
@@ -1526,9 +1541,9 @@ prof_tdata_init_impl(uint64_t thr_uid)
|
||||
tdata->thread_name = NULL;
|
||||
tdata->state = prof_tdata_state_attached;
|
||||
|
||||
if (ckh_new(&tdata->bt2tctx, PROF_CKH_MINITEMS,
|
||||
if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
|
||||
prof_bt_hash, prof_bt_keycomp)) {
|
||||
idalloc(tdata);
|
||||
idalloc(tsd, tdata);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@@ -1542,8 +1557,6 @@ prof_tdata_init_impl(uint64_t thr_uid)
|
||||
tdata->dumping = false;
|
||||
tdata->active = true;
|
||||
|
||||
prof_tdata_tsd_set(&tdata);
|
||||
|
||||
malloc_mutex_lock(&tdatas_mtx);
|
||||
tdata_tree_insert(&tdatas, tdata);
|
||||
malloc_mutex_unlock(&tdatas_mtx);
|
||||
@@ -1552,17 +1565,17 @@ prof_tdata_init_impl(uint64_t thr_uid)
|
||||
}
|
||||
|
||||
prof_tdata_t *
|
||||
prof_tdata_init(void)
|
||||
prof_tdata_init(tsd_t *tsd)
|
||||
{
|
||||
|
||||
return (prof_tdata_init_impl(prof_thr_uid_alloc()));
|
||||
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc()));
|
||||
}
|
||||
|
||||
prof_tdata_t *
|
||||
prof_tdata_reinit(prof_tdata_t *tdata)
|
||||
prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
|
||||
{
|
||||
|
||||
return (prof_tdata_init_impl(tdata->thr_uid));
|
||||
return (prof_tdata_init_impl(tsd, tdata->thr_uid));
|
||||
}
|
||||
|
||||
/* tdata->lock must be held. */
|
||||
@@ -1578,7 +1591,7 @@ prof_tdata_should_destroy(prof_tdata_t *tdata)
|
||||
}
|
||||
|
||||
static void
|
||||
prof_tdata_destroy(prof_tdata_t *tdata)
|
||||
prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata)
|
||||
{
|
||||
|
||||
assert(prof_tdata_should_destroy(tdata));
|
||||
@@ -1588,13 +1601,14 @@ prof_tdata_destroy(prof_tdata_t *tdata)
|
||||
malloc_mutex_unlock(&tdatas_mtx);
|
||||
|
||||
if (tdata->thread_name != NULL)
|
||||
idalloc(tdata->thread_name);
|
||||
ckh_delete(&tdata->bt2tctx);
|
||||
idalloc(tdata);
|
||||
idalloc(tsd, tdata->thread_name);
|
||||
ckh_delete(tsd, &tdata->bt2tctx);
|
||||
idalloc(tsd, tdata);
|
||||
}
|
||||
|
||||
static void
|
||||
prof_tdata_state_transition(prof_tdata_t *tdata, prof_tdata_state_t state)
|
||||
prof_tdata_state_transition(tsd_t *tsd, prof_tdata_t *tdata,
|
||||
prof_tdata_state_t state)
|
||||
{
|
||||
bool destroy_tdata;
|
||||
|
||||
@@ -1606,33 +1620,34 @@ prof_tdata_state_transition(prof_tdata_t *tdata, prof_tdata_state_t state)
|
||||
destroy_tdata = false;
|
||||
malloc_mutex_unlock(tdata->lock);
|
||||
if (destroy_tdata)
|
||||
prof_tdata_destroy(tdata);
|
||||
prof_tdata_destroy(tsd, tdata);
|
||||
}
|
||||
|
||||
static void
|
||||
prof_tdata_detach(prof_tdata_t *tdata)
|
||||
prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
|
||||
{
|
||||
|
||||
prof_tdata_state_transition(tdata, prof_tdata_state_detached);
|
||||
prof_tdata_state_transition(tsd, tdata, prof_tdata_state_detached);
|
||||
}
|
||||
|
||||
static void
|
||||
prof_tdata_expire(prof_tdata_t *tdata)
|
||||
prof_tdata_expire(tsd_t *tsd, prof_tdata_t *tdata)
|
||||
{
|
||||
|
||||
prof_tdata_state_transition(tdata, prof_tdata_state_expired);
|
||||
prof_tdata_state_transition(tsd, tdata, prof_tdata_state_expired);
|
||||
}
|
||||
|
||||
static prof_tdata_t *
|
||||
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
|
||||
{
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
|
||||
prof_tdata_expire(tdata);
|
||||
prof_tdata_expire(tsd, tdata);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
void
|
||||
prof_reset(size_t lg_sample)
|
||||
prof_reset(tsd_t *tsd, size_t lg_sample)
|
||||
{
|
||||
|
||||
assert(lg_sample < (sizeof(uint64_t) << 3));
|
||||
@@ -1641,69 +1656,58 @@ prof_reset(size_t lg_sample)
|
||||
malloc_mutex_lock(&tdatas_mtx);
|
||||
|
||||
lg_prof_sample = lg_sample;
|
||||
tdata_tree_iter(&tdatas, NULL, prof_tdata_reset_iter, NULL);
|
||||
tdata_tree_iter(&tdatas, NULL, prof_tdata_reset_iter, tsd);
|
||||
|
||||
malloc_mutex_unlock(&tdatas_mtx);
|
||||
malloc_mutex_unlock(&prof_dump_mtx);
|
||||
}
|
||||
|
||||
void
|
||||
prof_tdata_cleanup(void *arg)
|
||||
prof_tdata_cleanup(tsd_t *tsd)
|
||||
{
|
||||
prof_tdata_t *tdata = *(prof_tdata_t **)arg;
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
if (!config_prof)
|
||||
return;
|
||||
|
||||
if (tdata == PROF_TDATA_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor deallocated memory after this destructor
|
||||
* was called. Reset tdata to PROF_TDATA_STATE_PURGATORY in
|
||||
* order to receive another callback.
|
||||
*/
|
||||
tdata = PROF_TDATA_STATE_PURGATORY;
|
||||
prof_tdata_tsd_set(&tdata);
|
||||
} else if (tdata == PROF_TDATA_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to PROF_TDATA_STATE_PURGATORY so that other destructors
|
||||
* wouldn't cause re-creation of the tdata. This time, do
|
||||
* nothing, so that the destructor will not be called again.
|
||||
*/
|
||||
} else if (tdata != NULL) {
|
||||
prof_tdata_detach(tdata);
|
||||
tdata = PROF_TDATA_STATE_PURGATORY;
|
||||
prof_tdata_tsd_set(&tdata);
|
||||
}
|
||||
tdata = tsd_prof_tdata_get(tsd);
|
||||
if (tdata != NULL)
|
||||
prof_tdata_detach(tsd, tdata);
|
||||
}
|
||||
|
||||
const char *
|
||||
prof_thread_name_get(void)
|
||||
{
|
||||
prof_tdata_t *tdata = prof_tdata_get(true);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tsd_t *tsd;
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (NULL);
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata == NULL)
|
||||
return (NULL);
|
||||
return (tdata->thread_name);
|
||||
}
|
||||
|
||||
bool
|
||||
prof_thread_name_set(const char *thread_name)
|
||||
prof_thread_name_set(tsd_t *tsd, const char *thread_name)
|
||||
{
|
||||
prof_tdata_t *tdata;
|
||||
size_t size;
|
||||
char *s;
|
||||
|
||||
tdata = prof_tdata_get(true);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata == NULL)
|
||||
return (true);
|
||||
|
||||
size = strlen(thread_name) + 1;
|
||||
s = imalloc(size);
|
||||
s = imalloc(tsd, size);
|
||||
if (s == NULL)
|
||||
return (true);
|
||||
|
||||
memcpy(s, thread_name, size);
|
||||
if (tdata->thread_name != NULL)
|
||||
idalloc(tdata->thread_name);
|
||||
idalloc(tsd, tdata->thread_name);
|
||||
tdata->thread_name = s;
|
||||
return (false);
|
||||
}
|
||||
@@ -1711,8 +1715,13 @@ prof_thread_name_set(const char *thread_name)
|
||||
bool
|
||||
prof_thread_active_get(void)
|
||||
{
|
||||
prof_tdata_t *tdata = prof_tdata_get(true);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
tsd_t *tsd;
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (false);
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata == NULL)
|
||||
return (false);
|
||||
return (tdata->active);
|
||||
}
|
||||
@@ -1720,10 +1729,13 @@ prof_thread_active_get(void)
|
||||
bool
|
||||
prof_thread_active_set(bool active)
|
||||
{
|
||||
tsd_t *tsd;
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
tdata = prof_tdata_get(true);
|
||||
if ((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (true);
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (tdata == NULL)
|
||||
return (true);
|
||||
tdata->active = active;
|
||||
return (false);
|
||||
@@ -1772,20 +1784,18 @@ prof_boot2(void)
|
||||
cassert(config_prof);
|
||||
|
||||
if (opt_prof) {
|
||||
tsd_t *tsd;
|
||||
unsigned i;
|
||||
|
||||
lg_prof_sample = opt_lg_prof_sample;
|
||||
|
||||
if (ckh_new(&bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
|
||||
if ((tsd = tsd_tryget()) == NULL)
|
||||
return (true);
|
||||
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
|
||||
prof_bt_keycomp))
|
||||
return (true);
|
||||
if (malloc_mutex_init(&bt2gctx_mtx))
|
||||
return (true);
|
||||
if (prof_tdata_tsd_boot()) {
|
||||
malloc_write(
|
||||
"<jemalloc>: Error in pthread_key_create()\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
tdata_tree_new(&tdatas);
|
||||
if (malloc_mutex_init(&tdatas_mtx))
|
||||
|
@@ -9,26 +9,22 @@
|
||||
#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
|
||||
#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
|
||||
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, quarantine, quarantine_t *, NULL)
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(quarantine_t *quarantine);
|
||||
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
|
||||
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
|
||||
static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
|
||||
static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
|
||||
size_t upper_bound);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
quarantine_t *
|
||||
quarantine_init(size_t lg_maxobjs)
|
||||
quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
|
||||
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
|
||||
quarantine = (quarantine_t *)imalloc(tsd, offsetof(quarantine_t, objs) +
|
||||
((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
|
||||
if (quarantine == NULL)
|
||||
return (NULL);
|
||||
@@ -37,19 +33,17 @@ quarantine_init(size_t lg_maxobjs)
|
||||
quarantine->first = 0;
|
||||
quarantine->lg_maxobjs = lg_maxobjs;
|
||||
|
||||
quarantine_tsd_set(&quarantine);
|
||||
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
static quarantine_t *
|
||||
quarantine_grow(quarantine_t *quarantine)
|
||||
quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_t *ret;
|
||||
|
||||
ret = quarantine_init(quarantine->lg_maxobjs + 1);
|
||||
ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
|
||||
if (ret == NULL) {
|
||||
quarantine_drain_one(quarantine);
|
||||
quarantine_drain_one(tsd, quarantine);
|
||||
return (quarantine);
|
||||
}
|
||||
|
||||
@@ -71,17 +65,17 @@ quarantine_grow(quarantine_t *quarantine)
|
||||
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
|
||||
sizeof(quarantine_obj_t));
|
||||
}
|
||||
idalloc(quarantine);
|
||||
idalloc(tsd, quarantine);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain_one(quarantine_t *quarantine)
|
||||
quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
|
||||
{
|
||||
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
|
||||
assert(obj->usize == isalloc(obj->ptr, config_prof));
|
||||
idalloc(obj->ptr);
|
||||
idalloc(tsd, obj->ptr);
|
||||
quarantine->curbytes -= obj->usize;
|
||||
quarantine->curobjs--;
|
||||
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
|
||||
@@ -89,15 +83,15 @@ quarantine_drain_one(quarantine_t *quarantine)
|
||||
}
|
||||
|
||||
static void
|
||||
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
|
||||
quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
|
||||
{
|
||||
|
||||
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
|
||||
quarantine_drain_one(quarantine);
|
||||
quarantine_drain_one(tsd, quarantine);
|
||||
}
|
||||
|
||||
void
|
||||
quarantine(void *ptr)
|
||||
quarantine(tsd_t *tsd, void *ptr)
|
||||
{
|
||||
quarantine_t *quarantine;
|
||||
size_t usize = isalloc(ptr, config_prof);
|
||||
@@ -105,17 +99,8 @@ quarantine(void *ptr)
|
||||
cassert(config_fill);
|
||||
assert(opt_quarantine);
|
||||
|
||||
quarantine = *quarantine_tsd_get();
|
||||
if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
|
||||
if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that quarantine() was called after
|
||||
* quarantine_cleanup() was called.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_REINCARNATED;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
}
|
||||
idalloc(ptr);
|
||||
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
|
||||
idalloc(tsd, ptr);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@@ -125,11 +110,11 @@ quarantine(void *ptr)
|
||||
if (quarantine->curbytes + usize > opt_quarantine) {
|
||||
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
|
||||
- usize : 0;
|
||||
quarantine_drain(quarantine, upper_bound);
|
||||
quarantine_drain(tsd, quarantine, upper_bound);
|
||||
}
|
||||
/* Grow the quarantine ring buffer if it's full. */
|
||||
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
|
||||
quarantine = quarantine_grow(quarantine);
|
||||
quarantine = quarantine_grow(tsd, quarantine);
|
||||
/* quarantine_grow() must free a slot if it fails to grow. */
|
||||
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
|
||||
/* Append ptr if its size doesn't exceed the quarantine size. */
|
||||
@@ -154,46 +139,22 @@ quarantine(void *ptr)
|
||||
}
|
||||
} else {
|
||||
assert(quarantine->curbytes == 0);
|
||||
idalloc(ptr);
|
||||
idalloc(tsd, ptr);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
quarantine_cleanup(void *arg)
|
||||
quarantine_cleanup(tsd_t *tsd)
|
||||
{
|
||||
quarantine_t *quarantine = *(quarantine_t **)arg;
|
||||
quarantine_t *quarantine;
|
||||
|
||||
if (quarantine == QUARANTINE_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor deallocated memory after this destructor
|
||||
* was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
|
||||
* in order to receive another callback.
|
||||
*/
|
||||
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
} else if (quarantine == QUARANTINE_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to QUARANTINE_STATE_PURGATORY so that other destructors
|
||||
* wouldn't cause re-creation of the quarantine. This time, do
|
||||
* nothing, so that the destructor will not be called again.
|
||||
*/
|
||||
} else if (quarantine != NULL) {
|
||||
quarantine_drain(quarantine, 0);
|
||||
idalloc(quarantine);
|
||||
quarantine = QUARANTINE_STATE_PURGATORY;
|
||||
quarantine_tsd_set(&quarantine);
|
||||
if (!config_fill)
|
||||
return;
|
||||
|
||||
quarantine = tsd_quarantine_get(tsd);
|
||||
if (quarantine != NULL) {
|
||||
quarantine_drain(tsd, quarantine, 0);
|
||||
idalloc(tsd, quarantine);
|
||||
tsd_quarantine_set(tsd, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
quarantine_boot(void)
|
||||
{
|
||||
|
||||
cassert(config_fill);
|
||||
|
||||
if (quarantine_tsd_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
@@ -9,8 +9,10 @@ rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
|
||||
|
||||
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
|
||||
|
||||
bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
|
||||
bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
|
||||
bits_per_level = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void
|
||||
*)))) - 1;
|
||||
bits_in_leaf = jemalloc_ffs(pow2_ceil((RTREE_NODESIZE /
|
||||
sizeof(uint8_t)))) - 1;
|
||||
if (bits > bits_in_leaf) {
|
||||
height = 1 + (bits - bits_in_leaf) / bits_per_level;
|
||||
if ((height-1) * bits_per_level + bits_in_leaf != bits)
|
||||
|
101
src/tcache.c
101
src/tcache.c
@@ -4,9 +4,6 @@
|
||||
/******************************************************************************/
|
||||
/* Data. */
|
||||
|
||||
malloc_tsd_data(, tcache, tcache_t *, NULL)
|
||||
malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
|
||||
|
||||
bool opt_tcache = true;
|
||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||
|
||||
@@ -262,43 +259,14 @@ tcache_arena_dissociate(tcache_t *tcache)
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
tcache_get_hard(tcache_t *tcache, bool create)
|
||||
tcache_get_hard(tsd_t *tsd)
|
||||
{
|
||||
|
||||
if (tcache == NULL) {
|
||||
if (create == false) {
|
||||
/*
|
||||
* Creating a tcache here would cause
|
||||
* allocation as a side effect of free().
|
||||
* Ordinarily that would be okay since
|
||||
* tcache_create() failure is a soft failure
|
||||
* that doesn't propagate. However, if TLS
|
||||
* data are freed via free() as in glibc,
|
||||
* subtle corruption could result from setting
|
||||
* a TLS variable after its backing memory is
|
||||
* freed.
|
||||
*/
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache_enabled_get() == false) {
|
||||
tcache_enabled_set(false); /* Memoize. */
|
||||
return (NULL);
|
||||
}
|
||||
return (tcache_create(choose_arena(NULL)));
|
||||
}
|
||||
if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* Make a note that an allocator function was called
|
||||
* after tcache_thread_cleanup() was called.
|
||||
*/
|
||||
tcache = TCACHE_STATE_REINCARNATED;
|
||||
tcache_tsd_set(&tcache);
|
||||
if (tcache_enabled_get() == false) {
|
||||
tcache_enabled_set(false); /* Memoize. */
|
||||
return (NULL);
|
||||
}
|
||||
if (tcache == TCACHE_STATE_REINCARNATED)
|
||||
return (NULL);
|
||||
not_reached();
|
||||
return (NULL);
|
||||
return (tcache_create(choose_arena(tsd, NULL)));
|
||||
}
|
||||
|
||||
tcache_t *
|
||||
@@ -328,7 +296,7 @@ tcache_create(arena_t *arena)
|
||||
else if (size <= tcache_maxclass)
|
||||
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
|
||||
else
|
||||
tcache = (tcache_t *)icalloct(size, false, arena);
|
||||
tcache = (tcache_t *)icalloct(NULL, size, false, arena);
|
||||
|
||||
if (tcache == NULL)
|
||||
return (NULL);
|
||||
@@ -343,13 +311,11 @@ tcache_create(arena_t *arena)
|
||||
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
|
||||
}
|
||||
|
||||
tcache_tsd_set(&tcache);
|
||||
|
||||
return (tcache);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_destroy(tcache_t *tcache)
|
||||
static void
|
||||
tcache_destroy(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
unsigned i;
|
||||
size_t tcache_size;
|
||||
@@ -403,39 +369,30 @@ tcache_destroy(tcache_t *tcache)
|
||||
|
||||
arena_dalloc_large(arena, chunk, tcache);
|
||||
} else
|
||||
idalloct(tcache, false);
|
||||
idalloct(tsd, tcache, false);
|
||||
}
|
||||
|
||||
void
|
||||
tcache_thread_cleanup(void *arg)
|
||||
tcache_cleanup(tsd_t *tsd)
|
||||
{
|
||||
tcache_t *tcache = *(tcache_t **)arg;
|
||||
tcache_t *tcache;
|
||||
|
||||
if (tcache == TCACHE_STATE_DISABLED) {
|
||||
/* Do nothing. */
|
||||
} else if (tcache == TCACHE_STATE_REINCARNATED) {
|
||||
/*
|
||||
* Another destructor called an allocator function after this
|
||||
* destructor was called. Reset tcache to
|
||||
* TCACHE_STATE_PURGATORY in order to receive another callback.
|
||||
*/
|
||||
tcache = TCACHE_STATE_PURGATORY;
|
||||
tcache_tsd_set(&tcache);
|
||||
} else if (tcache == TCACHE_STATE_PURGATORY) {
|
||||
/*
|
||||
* The previous time this destructor was called, we set the key
|
||||
* to TCACHE_STATE_PURGATORY so that other destructors wouldn't
|
||||
* cause re-creation of the tcache. This time, do nothing, so
|
||||
* that the destructor will not be called again.
|
||||
*/
|
||||
} else if (tcache != NULL) {
|
||||
assert(tcache != TCACHE_STATE_PURGATORY);
|
||||
tcache_destroy(tcache);
|
||||
tcache = TCACHE_STATE_PURGATORY;
|
||||
tcache_tsd_set(&tcache);
|
||||
if (!config_tcache)
|
||||
return;
|
||||
|
||||
if ((tcache = tsd_tcache_get(tsd)) != NULL) {
|
||||
tcache_destroy(tsd, tcache);
|
||||
tsd_tcache_set(tsd, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
tcache_enabled_cleanup(tsd_t *tsd)
|
||||
{
|
||||
|
||||
/* Do nothing. */
|
||||
}
|
||||
|
||||
/* Caller must own arena->lock. */
|
||||
void
|
||||
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
@@ -464,7 +421,7 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot0(void)
|
||||
tcache_boot(void)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
@@ -504,13 +461,3 @@ tcache_boot0(void)
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_boot1(void)
|
||||
{
|
||||
|
||||
if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
51
src/tsd.c
51
src/tsd.c
@@ -7,6 +7,8 @@
|
||||
static unsigned ncleanups;
|
||||
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
|
||||
|
||||
malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
@@ -14,14 +16,15 @@ malloc_tsd_malloc(size_t size)
|
||||
{
|
||||
|
||||
/* Avoid choose_arena() in order to dodge bootstrapping issues. */
|
||||
return (arena_malloc(arenas[0], size, false, false));
|
||||
return (arena_malloc(NULL, arenas[0], CACHELINE_CEILING(size), false,
|
||||
false));
|
||||
}
|
||||
|
||||
void
|
||||
malloc_tsd_dalloc(void *wrapper)
|
||||
{
|
||||
|
||||
idalloct(wrapper, false);
|
||||
idalloct(NULL, wrapper, false);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -67,10 +70,54 @@ malloc_tsd_cleanup_register(bool (*f)(void))
|
||||
}
|
||||
|
||||
void
|
||||
tsd_cleanup(void *arg)
|
||||
{
|
||||
tsd_t *tsd = (tsd_t *)arg;
|
||||
|
||||
if (tsd == NULL) {
|
||||
/* OOM during re-initialization. */
|
||||
return;
|
||||
}
|
||||
|
||||
switch (tsd->state) {
|
||||
case tsd_state_nominal:
|
||||
#define O(n, t) \
|
||||
n##_cleanup(tsd);
|
||||
MALLOC_TSD
|
||||
#undef O
|
||||
tsd->state = tsd_state_purgatory;
|
||||
tsd_set(tsd);
|
||||
break;
|
||||
case tsd_state_purgatory:
|
||||
/*
|
||||
* The previous time this destructor was called, we set the
|
||||
* state to tsd_state_purgatory so that other destructors
|
||||
* wouldn't cause re-creation of the tsd. This time, do
|
||||
* nothing, and do not request another callback.
|
||||
*/
|
||||
break;
|
||||
case tsd_state_reincarnated:
|
||||
/*
|
||||
* Another destructor deallocated memory after this destructor
|
||||
* was called. Reset state to tsd_state_purgatory and request
|
||||
* another callback.
|
||||
*/
|
||||
tsd->state = tsd_state_purgatory;
|
||||
tsd_set(tsd);
|
||||
break;
|
||||
default:
|
||||
not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
malloc_tsd_boot(void)
|
||||
{
|
||||
|
||||
ncleanups = 0;
|
||||
if (tsd_boot())
|
||||
return (true);
|
||||
return (false);
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
Reference in New Issue
Block a user