Add basic reentrancy-checking support, and allow arena_new to reenter.

This checks whether or not we're reentrant using thread-local data, and, if we
are, moves certain internal allocations to use arena 0 (which should be properly
initialized after bootstrapping).

The immediate thing this allows is spinning up threads in arena_new, which will
enable spinning up background threads there.
This commit is contained in:
David Goldblatt 2017-03-31 19:59:45 -07:00 committed by David Goldblatt
parent 0a0fcd3e6a
commit b407a65401
10 changed files with 170 additions and 47 deletions

View File

@ -1,8 +1,8 @@
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
extern void (*hooks_arena_new_hook)();
extern void (*hooks_libc_hook)();
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)

View File

@ -1013,6 +1013,11 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
return arena;
}
/* During reentrancy, arena 0 is the safest bet. */
if (*tsd_reentrancy_levelp_get(tsd) > 1) {
return arena_get(tsd_tsdn(tsd), 0, true);
}
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
@ -1193,7 +1198,9 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
if (!is_internal && *tsd_reentrancy_levelp_get(tsdn_tsd(tsdn)) != 0) {
tcache = NULL;
}
arena_dalloc(tsdn, ptr, tcache, slow_path);
}

View File

@ -232,6 +232,7 @@ hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
hooks_arena_new_hook
hooks_libc_hook
iaalloc
ialloc
@ -537,6 +538,9 @@ tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_reentrancy_level_get
tsd_reentrancy_level_set
tsd_reentrancy_levelp_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal

View File

@ -65,7 +65,8 @@ struct tsd_init_head_s {
O(witnesses, witness_list_t, no, no, yes) \
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
no, no, no) \
O(witness_fork, bool, yes, no, no)
O(witness_fork, bool, yes, no, no) \
O(reentrancy_level, int, no, no, no)
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@ -82,7 +83,8 @@ struct tsd_init_head_s {
TCACHE_ZERO_INITIALIZER, \
ql_head_initializer(witnesses), \
RTREE_ELM_WITNESS_TSD_INITIALIZER, \
false \
false, \
0 \
}
struct tsd_s {

View File

@ -1952,6 +1952,19 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena->base = base;
/* We don't support reetrancy for arena 0 bootstrapping. */
if (ind != 0 && hooks_arena_new_hook) {
/*
* If we're here, then arena 0 already exists, so bootstrapping
* is done enough that we should have tsd.
*/
int *reentrancy_level = tsd_reentrancy_levelp_get(tsdn_tsd(
tsdn));
++*reentrancy_level;
hooks_arena_new_hook();
--*reentrancy_level;
}
return arena;
label_error:
if (ind != 0) {

View File

@ -1656,6 +1656,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
szind_t ind = 0;
size_t usize = 0;
/*
* For reentrancy checking, we get the old reentrancy level from tsd and
* reset it once we're done. In case of early bailout though, we never
* bother getting the old level, so we shouldn't try to reset it. This
* is indicated by leaving the pointer as NULL.
*/
int *reentrancy_level = NULL;
/* Initialize (if we can't prove we don't have to). */
if (sopts->slow) {
if (unlikely(malloc_init())) {
@ -1708,7 +1716,27 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
* some reason. Let's grab it right away.
*/
tsd = tsd_fetch();
/*
* If we need to handle reentrancy, we can do it out of a
* known-initialized arena (i.e. arena 0).
*/
reentrancy_level = tsd_reentrancy_levelp_get(tsd);
++*reentrancy_level;
if (*reentrancy_level == 1) {
witness_assert_lockless(tsd_tsdn(tsd));
}
if (unlikely(*reentrancy_level > 1)) {
/*
* We should never specify particular arenas or tcaches from
* within our internal allocations.
*/
assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC);
assert(dopts->arena_ind = ARENA_IND_AUTOMATIC);
dopts->tcache_ind = TCACHE_IND_NONE;
/* We know that arena 0 has already been initialized. */
dopts->arena_ind = 0;
}
/* If profiling is on, get our profiling context. */
if (config_prof && opt_prof) {
@ -1769,9 +1797,15 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
UTRACE(0, size, allocation);
}
witness_assert_lockless(tsd_tsdn(tsd));
/* Success! */
if (*reentrancy_level == 1) {
witness_assert_lockless(tsd_tsdn(tsd));
}
/*
* If we got here, we never bailed out on a failure path, so
* reentrancy_level is non-null.
*/
--*reentrancy_level;
*dopts->result = allocation;
return 0;
@ -1795,6 +1829,10 @@ label_oom:
*dopts->result = NULL;
}
if (reentrancy_level != NULL) {
--*reentrancy_level;
}
return ENOMEM;
/*
@ -1822,6 +1860,10 @@ label_invalid_alignment:
*dopts->result = NULL;
}
if (reentrancy_level != NULL) {
--*reentrancy_level;
}
return EINVAL;
}
@ -1996,7 +2038,9 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) {
JEMALLOC_ALWAYS_INLINE_C void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
witness_assert_lockless(tsd_tsdn(tsd));
}
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@ -2021,7 +2065,9 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
JEMALLOC_ALWAYS_INLINE_C void
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
witness_assert_lockless(tsd_tsdn(tsd));
}
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@ -2056,7 +2102,11 @@ je_realloc(void *ptr, size_t size) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
ifree(tsd, ptr, tcache_get(tsd), true);
tcache_t *tcache = NULL;
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
tcache = tcache_get(tsd);
}
ifree(tsd, ptr, tcache, true);
return NULL;
}
size = 1;
@ -2111,14 +2161,22 @@ je_free(void *ptr) {
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
witness_assert_lockless(tsd_tsdn(tsd));
if (likely(!malloc_slow)) {
ifree(tsd, ptr, tcache_get(tsd), false);
} else {
ifree(tsd, ptr, tcache_get(tsd), true);
}
tcache_t *tcache = NULL;
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
tcache = tcache_get(tsd);
}
if (likely(!malloc_slow)) {
ifree(tsd, ptr, tcache, false);
} else {
ifree(tsd, ptr, tcache, true);
}
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
witness_assert_lockless(tsd_tsdn(tsd));
}
}
}
/*
@ -2599,13 +2657,19 @@ je_dallocx(void *ptr, int flags) {
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
/* Not allowed to be reentrant and specify a custom tcache. */
assert(*tsd_reentrancy_levelp_get(tsd) == 0);
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
} else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
tcache = tcache_get(tsd);
} else {
tcache = NULL;
}
}
UTRACE(ptr, 0, 0);
@ -2646,13 +2710,19 @@ je_sdallocx(void *ptr, size_t size, int flags) {
witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
/* Not allowed to be reentrant and specify a custom tcache. */
assert(*tsd_reentrancy_levelp_get(tsd) == 0);
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
tcache = NULL;
} else {
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
}
} else {
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
tcache = tcache_get(tsd);
} else {
tcache = NULL;
}
}
UTRACE(ptr, 0, 0);

View File

@ -10,31 +10,56 @@ static const char * test_name = "";
/* Reentrancy testing helpers. */
#define NUM_REENTRANT_ALLOCS 20
static bool reentrant = false;
static bool hook_ran = false;
static void *to_free[NUM_REENTRANT_ALLOCS];
typedef enum {
non_reentrant = 0,
libc_reentrant = 1,
arena_new_reentrant = 2
} reentrancy_t;
static reentrancy_t reentrancy;
static bool libc_hook_ran = false;
static bool arena_new_hook_ran = false;
static const char *
reentrancy_t_str(reentrancy_t r) {
switch (r) {
case non_reentrant:
return "non-reentrant";
case libc_reentrant:
return "libc-reentrant";
case arena_new_reentrant:
return "arena_new-reentrant";
default:
unreachable();
}
}
static void
reentrancy_hook() {
hook_ran = true;
hooks_libc_hook = NULL;
do_hook(bool *hook_ran, void (**hook)()) {
*hook_ran = true;
*hook = NULL;
void *to_free_local[NUM_REENTRANT_ALLOCS];
size_t alloc_size = 1;
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
to_free[i] = malloc(alloc_size);
to_free_local[i] = malloc(alloc_size);
free(malloc(alloc_size));
alloc_size *= 2;
}
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
free(to_free_local[i]);
}
}
static void
libc_reentrancy_hook() {
do_hook(&libc_hook_ran, &hooks_libc_hook);
}
static void
arena_new_reentrancy_hook() {
do_hook(&arena_new_hook_ran, &hooks_arena_new_hook);
}
/* Actual test infrastructure. */
bool
test_is_reentrant() {
return reentrant;
return reentrancy != non_reentrant;
}
JEMALLOC_FORMAT_PRINTF(1, 2)
@ -81,9 +106,8 @@ p_test_init(const char *name) {
void
p_test_fini(void) {
test_counts[test_status]++;
malloc_printf("%s: %s (%s)\n", test_name,
test_status_string(test_status),
reentrant ? "reentrant" : "non-reentrant");
malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy),
test_status_string(test_status));
}
static test_status_t
@ -106,24 +130,28 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
ret = test_status_pass;
for (; t != NULL; t = va_arg(ap, test_t *)) {
/* Non-reentrant run. */
reentrant = false;
reentrancy = non_reentrant;
hooks_arena_new_hook = hooks_libc_hook = NULL;
t();
if (test_status > ret) {
ret = test_status;
}
/* Reentrant run. */
if (do_reentrant) {
reentrant = true;
hooks_libc_hook = &reentrancy_hook;
reentrancy = libc_reentrant;
hooks_arena_new_hook = NULL;
hooks_libc_hook = &libc_reentrancy_hook;
t();
if (test_status > ret) {
ret = test_status;
}
if (hook_ran) {
hook_ran = false;
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
free(to_free[i]);
}
reentrancy = arena_new_reentrant;
hooks_libc_hook = NULL;
hooks_arena_new_hook = &arena_new_reentrancy_hook;
t();
if (test_status > ret) {
ret = test_status;
}
}
}

View File

@ -156,7 +156,7 @@ TEST_END
int
main(void) {
return test(
return test_no_reentrancy(
test_malloc_vs_mallocx,
test_free_vs_dallocx,
test_dallocx_vs_sdallocx,

View File

@ -351,7 +351,7 @@ TEST_END
int
main(void) {
return test(
return test_no_reentrancy(
test_stats_summary,
test_stats_large,
test_stats_arenas_summary,

View File

@ -79,7 +79,6 @@ thd_start(void *arg) {
}
TEST_BEGIN(test_tsd_main_thread) {
test_skip_if(test_is_reentrant());
thd_start((void *)(uintptr_t)0xa5f3e329);
}
TEST_END
@ -144,7 +143,7 @@ main(void) {
data_tsd_boot();
data_test_started = true;
return test(
return test_no_reentrancy(
test_tsd_main_thread,
test_tsd_sub_thread,
test_tsd_reincarnation);