Fix tsd cleanup regressions.

Fix tsd cleanup regressions that were introduced in
5460aa6f66 (Convert all tsd variables to
reside in a single tsd structure.).  These regressions were twofold:

1) tsd_tryget() should never (and need never) return NULL.  Rename it to
   tsd_fetch() and simplify all callers.
2) tsd_*_set() must only be called when tsd is in the nominal state,
   because cleanup happens during the nominal-->purgatory transition,
   and re-initialization must not happen while in the purgatory state.
   Add tsd_nominal() and use it as needed.  Note that tsd_*{p,}_get()
   can still be used as long as no re-initialization that would require
   cleanup occurs.  This means that e.g. the thread_allocated counter
   can be updated unconditionally.
This commit is contained in:
Jason Evans 2014-10-04 11:12:53 -07:00
parent a4a972d9a1
commit 029d44cf8b
12 changed files with 137 additions and 147 deletions

View File

@ -390,12 +390,14 @@ tsd_arena_set
tsd_boot
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_get
tsd_get_wrapper
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
tsd_nominal
tsd_quarantine_get
tsd_quarantine_set
tsd_set
@ -411,7 +413,6 @@ tsd_thread_allocated_get
tsd_thread_allocated_set
tsd_thread_deallocated_get
tsd_thread_deallocated_set
tsd_tryget
u2rz
valgrind_freelike_block
valgrind_make_mem_defined

View File

@ -331,8 +331,10 @@ prof_tdata_get(tsd_t *tsd, bool create)
tdata = tsd_prof_tdata_get(tsd);
if (create) {
if (unlikely(tdata == NULL)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
if (tsd_nominal(tsd)) {
tdata = prof_tdata_init(tsd);
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
tdata = prof_tdata_reinit(tsd, tdata);
tsd_prof_tdata_set(tsd, tdata);

View File

@ -49,8 +49,8 @@ quarantine_alloc_hook(void)
assert(config_fill && opt_quarantine);
tsd = tsd_tryget();
if (tsd != NULL && tsd_quarantine_get(tsd) == NULL)
tsd = tsd_fetch();
if (tsd_quarantine_get(tsd) == NULL && tsd_nominal(tsd))
tsd_quarantine_set(tsd, quarantine_init(tsd, LG_MAXOBJS_INIT));
}
#endif

View File

@ -142,9 +142,8 @@ tcache_flush(void)
cassert(config_tcache);
tsd = tsd_tryget();
if (tsd != NULL)
tcache_cleanup(tsd);
tsd = tsd_fetch();
tcache_cleanup(tsd);
}
JEMALLOC_INLINE bool
@ -155,9 +154,7 @@ tcache_enabled_get(void)
cassert(config_tcache);
tsd = tsd_tryget();
if (tsd == NULL)
return (false);
tsd = tsd_fetch();
tcache_enabled = tsd_tcache_enabled_get(tsd);
if (tcache_enabled == tcache_enabled_default) {
tcache_enabled = (tcache_enabled_t)opt_tcache;
@ -175,9 +172,7 @@ tcache_enabled_set(bool enabled)
cassert(config_tcache);
tsd = tsd_tryget();
if (tsd == NULL)
return;
tsd = tsd_fetch();
tcache_enabled = (tcache_enabled_t)enabled;
tsd_tcache_enabled_set(tsd, tcache_enabled);
@ -195,17 +190,11 @@ tcache_get(tsd_t *tsd, bool create)
return (NULL);
if (config_lazy_lock && !isthreaded)
return (NULL);
/*
* If create is true, the caller has already assured that tsd is
* non-NULL.
*/
if (!create && unlikely(tsd == NULL))
return (NULL);
tcache = tsd_tcache_get(tsd);
if (!create)
return (tcache);
if (unlikely(tcache == NULL)) {
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
tcache = tcache_get_hard(tsd);
tsd_tcache_set(tsd, tcache);
}

View File

@ -49,16 +49,19 @@ typedef enum {
* Note that all of the functions deal in terms of (a_type *) rather than
* (a_type) so that it is possible to support non-pointer types (unlike
* pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is
* cast to (void *). This means that the cleanup function needs to cast *and*
* dereference the function argument, e.g.:
* cast to (void *). This means that the cleanup function needs to cast the
* function argument to (a_type *), then dereference the resulting pointer to
* access fields, e.g.
*
* bool
* void
* example_tsd_cleanup(void *arg)
* {
* example_t *example = *(example_t **)arg;
* example_t *example = (example_t *)arg;
*
* example->x = 42;
* [...]
* return ([want the cleanup function to be called again]);
* if ([want the cleanup function to be called again])
* example_tsd_set(example);
* }
*
* If example_tsd_set() is called within example_tsd_cleanup(), it will be
@ -468,7 +471,8 @@ void tsd_cleanup(void *arg);
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_tryget(void);
tsd_t *tsd_fetch(void);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \
@ -481,50 +485,53 @@ MALLOC_TSD
malloc_tsd_externs(, tsd_t)
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
JEMALLOC_INLINE tsd_t *
tsd_tryget(void)
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)
{
tsd_t *tsd;
tsd_t *tsd = tsd_get();
tsd = tsd_get();
if (unlikely(tsd == NULL))
return (NULL);
if (likely(tsd->state == tsd_state_nominal))
return (tsd);
else if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
tsd_set(tsd);
return (tsd);
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
return (NULL);
} else {
assert(tsd->state == tsd_state_reincarnated);
return (NULL);
if (unlikely(tsd->state != tsd_state_nominal)) {
if (tsd->state == tsd_state_uninitialized) {
tsd->state = tsd_state_nominal;
/* Trigger cleanup handler registration. */
tsd_set(tsd);
} else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated;
tsd_set(tsd);
} else
assert(tsd->state == tsd_state_reincarnated);
}
return (tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
return (tsd->state == tsd_state_nominal);
}
#define O(n, t) \
JEMALLOC_INLINE t * \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_INLINE t \
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_INLINE void \
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
MALLOC_TSD

View File

@ -571,9 +571,7 @@ ctl_grow(void)
ctl_arena_stats_t *astats;
arena_t **tarenas;
tsd = tsd_tryget();
if (tsd == NULL)
return (true);
tsd = tsd_fetch();
/* Allocate extended arena stats and arenas arrays. */
astats = (ctl_arena_stats_t *)imalloc(tsd, (ctl_stats.narenas + 2) *
@ -1132,11 +1130,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (!(c)) \
return (ENOENT); \
READONLY(); \
tsd = tsd_tryget(); \
if (tsd == NULL) { \
ret = EAGAIN; \
goto label_return; \
} \
tsd = tsd_fetch(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
@ -1239,9 +1233,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
tsd_t *tsd;
unsigned newind, oldind;
tsd = tsd_tryget();
if (tsd == NULL)
return (EAGAIN);
tsd = tsd_fetch();
malloc_mutex_lock(&ctl_mtx);
newind = oldind = choose_arena(tsd, NULL)->ind;
@ -1359,11 +1351,7 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
goto label_return;
}
tsd = tsd_tryget();
if (tsd == NULL) {
ret = EAGAIN;
goto label_return;
}
tsd = tsd_fetch();
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0)
@ -1763,11 +1751,7 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1;
tsd = tsd_tryget();
if (tsd == NULL) {
ret = EAGAIN;
goto label_return;
}
tsd = tsd_fetch();
prof_reset(tsd, lg_sample);

View File

@ -194,7 +194,8 @@ choose_arena_hard(tsd_t *tsd)
malloc_mutex_unlock(&arenas_lock);
}
tsd_arena_set(tsd, ret);
if (tsd_nominal(tsd))
tsd_arena_set(tsd, ret);
return (ret);
}
@ -908,8 +909,9 @@ JEMALLOC_ALWAYS_INLINE_C void *
imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
{
if (unlikely(malloc_init()) || unlikely((*tsd = tsd_tryget()) == NULL))
if (unlikely(malloc_init()))
return (NULL);
*tsd = tsd_fetch();
if (config_prof && opt_prof) {
*usize = s2u(size);
@ -1000,10 +1002,11 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
assert(min_alignment != 0);
if (unlikely(malloc_init()) || unlikely((tsd = tsd_tryget()) == NULL)) {
if (unlikely(malloc_init())) {
result = NULL;
goto label_oom;
} else {
tsd = tsd_fetch();
if (size == 0)
size = 1;
@ -1124,11 +1127,12 @@ je_calloc(size_t num, size_t size)
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(malloc_init()) || unlikely((tsd = tsd_tryget()) == NULL)) {
if (unlikely(malloc_init())) {
num_size = 0;
ret = NULL;
goto label_return;
}
tsd = tsd_fetch();
num_size = num * size;
if (unlikely(num_size == 0)) {
@ -1228,7 +1232,7 @@ ifree(tsd_t *tsd, void *ptr, bool try_tcache)
prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats && likely(tsd != NULL))
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(ptr);
@ -1246,7 +1250,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, bool try_tcache)
if (config_prof && opt_prof)
prof_free(tsd, ptr, usize);
if (config_stats && likely(tsd != NULL))
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(ptr);
@ -1267,7 +1271,7 @@ je_realloc(void *ptr, size_t size)
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_tryget();
tsd = tsd_fetch();
ifree(tsd, ptr, true);
return (NULL);
}
@ -1277,27 +1281,23 @@ je_realloc(void *ptr, size_t size)
if (likely(ptr != NULL)) {
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
if ((tsd = tsd_tryget()) != NULL) {
if ((config_prof && opt_prof) || config_stats ||
(config_valgrind && unlikely(in_valgrind)))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind)) {
old_rzsize = config_prof ? p2rz(ptr) :
u2rz(old_usize);
}
if ((config_prof && opt_prof) || config_stats ||
(config_valgrind && unlikely(in_valgrind)))
old_usize = isalloc(ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
if (config_prof && opt_prof) {
if (config_prof && opt_prof) {
usize = s2u(size);
ret = irealloc_prof(tsd, ptr, old_usize, usize);
} else {
if (config_stats || (config_valgrind &&
unlikely(in_valgrind)))
usize = s2u(size);
ret = irealloc_prof(tsd, ptr, old_usize, usize);
} else {
if (config_stats || (config_valgrind &&
unlikely(in_valgrind)))
usize = s2u(size);
ret = iralloc(tsd, ptr, size, 0, false);
}
} else
ret = NULL;
ret = iralloc(tsd, ptr, size, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
ret = imalloc_body(size, &tsd, &usize);
@ -1313,10 +1313,8 @@ je_realloc(void *ptr, size_t size)
}
if (config_stats && likely(ret != NULL)) {
assert(usize == isalloc(ret, config_prof));
if (tsd != NULL) {
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
@ -1330,7 +1328,7 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL))
ifree(tsd_tryget(), ptr, true);
ifree(tsd_fetch(), ptr, true);
}
/*
@ -1543,8 +1541,9 @@ je_mallocx(size_t size, int flags)
assert(size != 0);
if (unlikely(malloc_init()) || unlikely((tsd = tsd_tryget()) == NULL))
if (unlikely(malloc_init()))
goto label_oom;
tsd = tsd_fetch();
if (config_prof && opt_prof)
p = imallocx_prof(tsd, size, flags, &usize);
@ -1554,10 +1553,8 @@ je_mallocx(size_t size, int flags)
goto label_oom;
if (config_stats) {
tsd_t *tsd = tsd_tryget();
assert(usize == isalloc(p, config_prof));
if (tsd != NULL)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
@ -1649,9 +1646,7 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(size != 0);
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (unlikely((tsd = tsd_tryget()) == NULL))
goto label_oom;
tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
@ -1794,6 +1789,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
@ -1802,10 +1798,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
arena = NULL;
old_usize = isalloc(ptr, config_prof);
if (unlikely((tsd = tsd_tryget()) == NULL)) {
usize = old_usize;
goto label_not_resized;
}
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
@ -1865,7 +1857,7 @@ je_dallocx(void *ptr, int flags)
try_tcache = true;
UTRACE(ptr, 0, 0);
ifree(tsd_tryget(), ptr, try_tcache);
ifree(tsd_fetch(), ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE_C size_t
@ -1901,7 +1893,7 @@ je_sdallocx(void *ptr, size_t size, int flags)
try_tcache = true;
UTRACE(ptr, 0, 0);
isfree(tsd_tryget(), ptr, usize, try_tcache);
isfree(tsd_fetch(), ptr, usize, try_tcache);
}
size_t

View File

@ -850,8 +850,7 @@ prof_bt_count(void)
tsd_t *tsd;
prof_tdata_t *tdata;
if ((tsd = tsd_tryget()) == NULL)
return (0);
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return (0);
@ -1475,8 +1474,7 @@ prof_fdump(void)
if (!prof_booted)
return;
if ((tsd = tsd_tryget()) == NULL)
return;
tsd = tsd_fetch();
if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
@ -1497,8 +1495,7 @@ prof_idump(void)
if (!prof_booted)
return;
if ((tsd = tsd_tryget()) == NULL)
return;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@ -1526,8 +1523,7 @@ prof_mdump(const char *filename)
if (!opt_prof || !prof_booted)
return (true);
if ((tsd = tsd_tryget()) == NULL)
return (true);
tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
@ -1553,8 +1549,7 @@ prof_gdump(void)
if (!prof_booted)
return;
if ((tsd = tsd_tryget()) == NULL)
return;
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@ -1677,6 +1672,7 @@ prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata)
{
assert(prof_tdata_should_destroy(tdata));
assert(tsd_prof_tdata_get(tsd) != tdata);
tdata_tree_remove(&tdatas, tdata);
@ -1704,6 +1700,7 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
if (tdata->attached) {
tdata->attached = false;
destroy_tdata = prof_tdata_should_destroy(tdata);
tsd_prof_tdata_set(tsd, NULL);
} else
destroy_tdata = false;
malloc_mutex_unlock(tdata->lock);
@ -1819,8 +1816,7 @@ prof_thread_name_get(void)
tsd_t *tsd;
prof_tdata_t *tdata;
if ((tsd = tsd_tryget()) == NULL)
return ("");
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return ("");
@ -1886,8 +1882,7 @@ prof_thread_active_get(void)
tsd_t *tsd;
prof_tdata_t *tdata;
if ((tsd = tsd_tryget()) == NULL)
return (false);
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (false);
@ -1900,8 +1895,7 @@ prof_thread_active_set(bool active)
tsd_t *tsd;
prof_tdata_t *tdata;
if ((tsd = tsd_tryget()) == NULL)
return (true);
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
@ -1988,8 +1982,7 @@ prof_boot2(void)
if (malloc_mutex_init(&prof_thread_active_init_mtx))
return (true);
if ((tsd = tsd_tryget()) == NULL)
return (true);
tsd = tsd_fetch();
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);

View File

@ -263,7 +263,8 @@ tcache_get_hard(tsd_t *tsd)
{
if (!tcache_enabled_get()) {
tcache_enabled_set(false); /* Memoize. */
if (tsd_nominal(tsd))
tcache_enabled_set(false); /* Memoize. */
return (NULL);
}
return (tcache_create(choose_arena(tsd, NULL)));

View File

@ -74,11 +74,6 @@ tsd_cleanup(void *arg)
{
tsd_t *tsd = (tsd_t *)arg;
if (tsd == NULL) {
/* OOM during re-initialization. */
return;
}
switch (tsd->state) {
case tsd_state_nominal:
#define O(n, t) \

View File

@ -5,8 +5,7 @@ TEST_BEGIN(test_new_delete)
tsd_t *tsd;
ckh_t ckh;
tsd = tsd_tryget();
assert_ptr_not_null(tsd, "Unexpected tsd failure");
tsd = tsd_fetch();
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
@ -31,8 +30,7 @@ TEST_BEGIN(test_count_insert_search_remove)
const char *missing = "A string not in the hash table.";
size_t i;
tsd = tsd_tryget();
assert_ptr_not_null(tsd, "Unexpected tsd failure");
tsd = tsd_fetch();
assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp),
"Unexpected ckh_new() error");
@ -116,8 +114,7 @@ TEST_BEGIN(test_insert_iter_remove)
void *q, *r;
size_t i;
tsd = tsd_tryget();
assert_ptr_not_null(tsd, "Unexpected tsd failure");
tsd = tsd_fetch();
assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash,
ckh_pointer_keycomp), "Unexpected ckh_new() error");

View File

@ -6,17 +6,46 @@ typedef unsigned int data_t;
static bool data_cleanup_executed;
malloc_tsd_protos(, data_, data_t)
void
data_cleanup(void *arg)
{
data_t *data = (data_t *)arg;
assert_x_eq(*data, THREAD_DATA,
"Argument passed into cleanup function should match tsd value");
if (!data_cleanup_executed) {
assert_x_eq(*data, THREAD_DATA,
"Argument passed into cleanup function should match tsd "
"value");
}
data_cleanup_executed = true;
/*
* Allocate during cleanup for two rounds, in order to assure that
* jemalloc's internal tsd reinitialization happens.
*/
switch (*data) {
case THREAD_DATA:
*data = 1;
data_tsd_set(data);
break;
case 1:
*data = 2;
data_tsd_set(data);
break;
case 2:
return;
default:
not_reached();
}
{
void *p = mallocx(1, 0);
assert_ptr_not_null(p, "Unexpeced mallocx() failure");
dallocx(p, 0);
}
}
malloc_tsd_protos(, data_, data_t)
malloc_tsd_externs(data_, data_t)
#define DATA_INIT 0x12345678
malloc_tsd_data(, data_, data_t, DATA_INIT)