Add basic reentrancy-checking support, and allow arena_new to reenter.
This checks whether or not we're reentrant using thread-local data, and, if we are, moves certain internal allocations to use arena 0 (which should be properly initialized after bootstrapping). The immediate thing this allows is spinning up threads in arena_new, which will enable spinning up background threads there.
This commit is contained in:
parent
0a0fcd3e6a
commit
b407a65401
@ -1,8 +1,8 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_HOOKS_H
|
#ifndef JEMALLOC_INTERNAL_HOOKS_H
|
||||||
#define JEMALLOC_INTERNAL_HOOKS_H
|
#define JEMALLOC_INTERNAL_HOOKS_H
|
||||||
|
|
||||||
extern void (*hooks_arena_new_hook)();
|
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
|
||||||
extern void (*hooks_libc_hook)();
|
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
|
||||||
|
|
||||||
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
|
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
|
||||||
|
|
||||||
|
@ -1013,6 +1013,11 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
|||||||
return arena;
|
return arena;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* During reentrancy, arena 0 is the safest bet. */
|
||||||
|
if (*tsd_reentrancy_levelp_get(tsd) > 1) {
|
||||||
|
return arena_get(tsd_tsdn(tsd), 0, true);
|
||||||
|
}
|
||||||
|
|
||||||
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
||||||
if (unlikely(ret == NULL)) {
|
if (unlikely(ret == NULL)) {
|
||||||
ret = arena_choose_hard(tsd, internal);
|
ret = arena_choose_hard(tsd, internal);
|
||||||
@ -1193,7 +1198,9 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
|
|||||||
if (config_stats && is_internal) {
|
if (config_stats && is_internal) {
|
||||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||||
}
|
}
|
||||||
|
if (!is_internal && *tsd_reentrancy_levelp_get(tsdn_tsd(tsdn)) != 0) {
|
||||||
|
tcache = NULL;
|
||||||
|
}
|
||||||
arena_dalloc(tsdn, ptr, tcache, slow_path);
|
arena_dalloc(tsdn, ptr, tcache, slow_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,6 +232,7 @@ hash_rotl_64
|
|||||||
hash_x64_128
|
hash_x64_128
|
||||||
hash_x86_128
|
hash_x86_128
|
||||||
hash_x86_32
|
hash_x86_32
|
||||||
|
hooks_arena_new_hook
|
||||||
hooks_libc_hook
|
hooks_libc_hook
|
||||||
iaalloc
|
iaalloc
|
||||||
ialloc
|
ialloc
|
||||||
@ -537,6 +538,9 @@ tsd_init_head
|
|||||||
tsd_narenas_tdata_get
|
tsd_narenas_tdata_get
|
||||||
tsd_narenas_tdata_set
|
tsd_narenas_tdata_set
|
||||||
tsd_narenas_tdatap_get
|
tsd_narenas_tdatap_get
|
||||||
|
tsd_reentrancy_level_get
|
||||||
|
tsd_reentrancy_level_set
|
||||||
|
tsd_reentrancy_levelp_get
|
||||||
tsd_wrapper_get
|
tsd_wrapper_get
|
||||||
tsd_wrapper_set
|
tsd_wrapper_set
|
||||||
tsd_nominal
|
tsd_nominal
|
||||||
|
@ -65,7 +65,8 @@ struct tsd_init_head_s {
|
|||||||
O(witnesses, witness_list_t, no, no, yes) \
|
O(witnesses, witness_list_t, no, no, yes) \
|
||||||
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
|
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
|
||||||
no, no, no) \
|
no, no, no) \
|
||||||
O(witness_fork, bool, yes, no, no)
|
O(witness_fork, bool, yes, no, no) \
|
||||||
|
O(reentrancy_level, int, no, no, no)
|
||||||
|
|
||||||
#define TSD_INITIALIZER { \
|
#define TSD_INITIALIZER { \
|
||||||
tsd_state_uninitialized, \
|
tsd_state_uninitialized, \
|
||||||
@ -82,7 +83,8 @@ struct tsd_init_head_s {
|
|||||||
TCACHE_ZERO_INITIALIZER, \
|
TCACHE_ZERO_INITIALIZER, \
|
||||||
ql_head_initializer(witnesses), \
|
ql_head_initializer(witnesses), \
|
||||||
RTREE_ELM_WITNESS_TSD_INITIALIZER, \
|
RTREE_ELM_WITNESS_TSD_INITIALIZER, \
|
||||||
false \
|
false, \
|
||||||
|
0 \
|
||||||
}
|
}
|
||||||
|
|
||||||
struct tsd_s {
|
struct tsd_s {
|
||||||
|
13
src/arena.c
13
src/arena.c
@ -1952,6 +1952,19 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
|
|
||||||
arena->base = base;
|
arena->base = base;
|
||||||
|
|
||||||
|
/* We don't support reetrancy for arena 0 bootstrapping. */
|
||||||
|
if (ind != 0 && hooks_arena_new_hook) {
|
||||||
|
/*
|
||||||
|
* If we're here, then arena 0 already exists, so bootstrapping
|
||||||
|
* is done enough that we should have tsd.
|
||||||
|
*/
|
||||||
|
int *reentrancy_level = tsd_reentrancy_levelp_get(tsdn_tsd(
|
||||||
|
tsdn));
|
||||||
|
++*reentrancy_level;
|
||||||
|
hooks_arena_new_hook();
|
||||||
|
--*reentrancy_level;
|
||||||
|
}
|
||||||
|
|
||||||
return arena;
|
return arena;
|
||||||
label_error:
|
label_error:
|
||||||
if (ind != 0) {
|
if (ind != 0) {
|
||||||
|
@ -1656,6 +1656,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
|||||||
szind_t ind = 0;
|
szind_t ind = 0;
|
||||||
size_t usize = 0;
|
size_t usize = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For reentrancy checking, we get the old reentrancy level from tsd and
|
||||||
|
* reset it once we're done. In case of early bailout though, we never
|
||||||
|
* bother getting the old level, so we shouldn't try to reset it. This
|
||||||
|
* is indicated by leaving the pointer as NULL.
|
||||||
|
*/
|
||||||
|
int *reentrancy_level = NULL;
|
||||||
|
|
||||||
/* Initialize (if we can't prove we don't have to). */
|
/* Initialize (if we can't prove we don't have to). */
|
||||||
if (sopts->slow) {
|
if (sopts->slow) {
|
||||||
if (unlikely(malloc_init())) {
|
if (unlikely(malloc_init())) {
|
||||||
@ -1708,7 +1716,27 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
|||||||
* some reason. Let's grab it right away.
|
* some reason. Let's grab it right away.
|
||||||
*/
|
*/
|
||||||
tsd = tsd_fetch();
|
tsd = tsd_fetch();
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
|
||||||
|
/*
|
||||||
|
* If we need to handle reentrancy, we can do it out of a
|
||||||
|
* known-initialized arena (i.e. arena 0).
|
||||||
|
*/
|
||||||
|
reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
||||||
|
++*reentrancy_level;
|
||||||
|
if (*reentrancy_level == 1) {
|
||||||
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
|
}
|
||||||
|
if (unlikely(*reentrancy_level > 1)) {
|
||||||
|
/*
|
||||||
|
* We should never specify particular arenas or tcaches from
|
||||||
|
* within our internal allocations.
|
||||||
|
*/
|
||||||
|
assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC);
|
||||||
|
assert(dopts->arena_ind = ARENA_IND_AUTOMATIC);
|
||||||
|
dopts->tcache_ind = TCACHE_IND_NONE;
|
||||||
|
/* We know that arena 0 has already been initialized. */
|
||||||
|
dopts->arena_ind = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* If profiling is on, get our profiling context. */
|
/* If profiling is on, get our profiling context. */
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
@ -1769,9 +1797,15 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
|
|||||||
UTRACE(0, size, allocation);
|
UTRACE(0, size, allocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
|
||||||
|
|
||||||
/* Success! */
|
/* Success! */
|
||||||
|
if (*reentrancy_level == 1) {
|
||||||
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* If we got here, we never bailed out on a failure path, so
|
||||||
|
* reentrancy_level is non-null.
|
||||||
|
*/
|
||||||
|
--*reentrancy_level;
|
||||||
*dopts->result = allocation;
|
*dopts->result = allocation;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1795,6 +1829,10 @@ label_oom:
|
|||||||
*dopts->result = NULL;
|
*dopts->result = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (reentrancy_level != NULL) {
|
||||||
|
--*reentrancy_level;
|
||||||
|
}
|
||||||
|
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1822,6 +1860,10 @@ label_invalid_alignment:
|
|||||||
*dopts->result = NULL;
|
*dopts->result = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (reentrancy_level != NULL) {
|
||||||
|
--*reentrancy_level;
|
||||||
|
}
|
||||||
|
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1996,7 +2038,9 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) {
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void
|
JEMALLOC_ALWAYS_INLINE_C void
|
||||||
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
|
||||||
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
|
}
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(malloc_initialized() || IS_INITIALIZER);
|
assert(malloc_initialized() || IS_INITIALIZER);
|
||||||
@ -2021,7 +2065,9 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void
|
JEMALLOC_ALWAYS_INLINE_C void
|
||||||
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
|
||||||
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
|
}
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(malloc_initialized() || IS_INITIALIZER);
|
assert(malloc_initialized() || IS_INITIALIZER);
|
||||||
@ -2056,7 +2102,11 @@ je_realloc(void *ptr, size_t size) {
|
|||||||
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
||||||
UTRACE(ptr, 0, 0);
|
UTRACE(ptr, 0, 0);
|
||||||
tsd = tsd_fetch();
|
tsd = tsd_fetch();
|
||||||
ifree(tsd, ptr, tcache_get(tsd), true);
|
tcache_t *tcache = NULL;
|
||||||
|
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
|
||||||
|
tcache = tcache_get(tsd);
|
||||||
|
}
|
||||||
|
ifree(tsd, ptr, tcache, true);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
size = 1;
|
size = 1;
|
||||||
@ -2111,13 +2161,21 @@ je_free(void *ptr) {
|
|||||||
UTRACE(ptr, 0, 0);
|
UTRACE(ptr, 0, 0);
|
||||||
if (likely(ptr != NULL)) {
|
if (likely(ptr != NULL)) {
|
||||||
tsd_t *tsd = tsd_fetch();
|
tsd_t *tsd = tsd_fetch();
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
|
||||||
if (likely(!malloc_slow)) {
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
ifree(tsd, ptr, tcache_get(tsd), false);
|
}
|
||||||
} else {
|
tcache_t *tcache = NULL;
|
||||||
ifree(tsd, ptr, tcache_get(tsd), true);
|
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
|
||||||
|
tcache = tcache_get(tsd);
|
||||||
|
}
|
||||||
|
if (likely(!malloc_slow)) {
|
||||||
|
ifree(tsd, ptr, tcache, false);
|
||||||
|
} else {
|
||||||
|
ifree(tsd, ptr, tcache, true);
|
||||||
|
}
|
||||||
|
if (*tsd_reentrancy_levelp_get(tsd) == 0) {
|
||||||
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
}
|
}
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2599,13 +2657,19 @@ je_dallocx(void *ptr, int flags) {
|
|||||||
tsd = tsd_fetch();
|
tsd = tsd_fetch();
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
||||||
|
/* Not allowed to be reentrant and specify a custom tcache. */
|
||||||
|
assert(*tsd_reentrancy_levelp_get(tsd) == 0);
|
||||||
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
|
||||||
tcache = NULL;
|
tcache = NULL;
|
||||||
} else {
|
} else {
|
||||||
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tcache = tcache_get(tsd);
|
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
|
||||||
|
tcache = tcache_get(tsd);
|
||||||
|
} else {
|
||||||
|
tcache = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
UTRACE(ptr, 0, 0);
|
UTRACE(ptr, 0, 0);
|
||||||
@ -2646,13 +2710,19 @@ je_sdallocx(void *ptr, size_t size, int flags) {
|
|||||||
|
|
||||||
witness_assert_lockless(tsd_tsdn(tsd));
|
witness_assert_lockless(tsd_tsdn(tsd));
|
||||||
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
||||||
|
/* Not allowed to be reentrant and specify a custom tcache. */
|
||||||
|
assert(*tsd_reentrancy_levelp_get(tsd) == 0);
|
||||||
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
|
||||||
tcache = NULL;
|
tcache = NULL;
|
||||||
} else {
|
} else {
|
||||||
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tcache = tcache_get(tsd);
|
if (likely(*tsd_reentrancy_levelp_get(tsd) == 0)) {
|
||||||
|
tcache = tcache_get(tsd);
|
||||||
|
} else {
|
||||||
|
tcache = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
UTRACE(ptr, 0, 0);
|
UTRACE(ptr, 0, 0);
|
||||||
|
@ -10,31 +10,56 @@ static const char * test_name = "";
|
|||||||
/* Reentrancy testing helpers. */
|
/* Reentrancy testing helpers. */
|
||||||
|
|
||||||
#define NUM_REENTRANT_ALLOCS 20
|
#define NUM_REENTRANT_ALLOCS 20
|
||||||
static bool reentrant = false;
|
typedef enum {
|
||||||
static bool hook_ran = false;
|
non_reentrant = 0,
|
||||||
static void *to_free[NUM_REENTRANT_ALLOCS];
|
libc_reentrant = 1,
|
||||||
|
arena_new_reentrant = 2
|
||||||
|
} reentrancy_t;
|
||||||
|
static reentrancy_t reentrancy;
|
||||||
|
|
||||||
|
static bool libc_hook_ran = false;
|
||||||
|
static bool arena_new_hook_ran = false;
|
||||||
|
|
||||||
|
static const char *
|
||||||
|
reentrancy_t_str(reentrancy_t r) {
|
||||||
|
switch (r) {
|
||||||
|
case non_reentrant:
|
||||||
|
return "non-reentrant";
|
||||||
|
case libc_reentrant:
|
||||||
|
return "libc-reentrant";
|
||||||
|
case arena_new_reentrant:
|
||||||
|
return "arena_new-reentrant";
|
||||||
|
default:
|
||||||
|
unreachable();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
reentrancy_hook() {
|
do_hook(bool *hook_ran, void (**hook)()) {
|
||||||
hook_ran = true;
|
*hook_ran = true;
|
||||||
hooks_libc_hook = NULL;
|
*hook = NULL;
|
||||||
|
|
||||||
void *to_free_local[NUM_REENTRANT_ALLOCS];
|
|
||||||
size_t alloc_size = 1;
|
size_t alloc_size = 1;
|
||||||
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
|
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
|
||||||
to_free[i] = malloc(alloc_size);
|
free(malloc(alloc_size));
|
||||||
to_free_local[i] = malloc(alloc_size);
|
|
||||||
alloc_size *= 2;
|
alloc_size *= 2;
|
||||||
}
|
}
|
||||||
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
|
}
|
||||||
free(to_free_local[i]);
|
|
||||||
}
|
static void
|
||||||
|
libc_reentrancy_hook() {
|
||||||
|
do_hook(&libc_hook_ran, &hooks_libc_hook);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_new_reentrancy_hook() {
|
||||||
|
do_hook(&arena_new_hook_ran, &hooks_arena_new_hook);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Actual test infrastructure. */
|
/* Actual test infrastructure. */
|
||||||
bool
|
bool
|
||||||
test_is_reentrant() {
|
test_is_reentrant() {
|
||||||
return reentrant;
|
return reentrancy != non_reentrant;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_FORMAT_PRINTF(1, 2)
|
JEMALLOC_FORMAT_PRINTF(1, 2)
|
||||||
@ -81,9 +106,8 @@ p_test_init(const char *name) {
|
|||||||
void
|
void
|
||||||
p_test_fini(void) {
|
p_test_fini(void) {
|
||||||
test_counts[test_status]++;
|
test_counts[test_status]++;
|
||||||
malloc_printf("%s: %s (%s)\n", test_name,
|
malloc_printf("%s (%s): %s\n", test_name, reentrancy_t_str(reentrancy),
|
||||||
test_status_string(test_status),
|
test_status_string(test_status));
|
||||||
reentrant ? "reentrant" : "non-reentrant");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static test_status_t
|
static test_status_t
|
||||||
@ -106,24 +130,28 @@ p_test_impl(bool do_malloc_init, bool do_reentrant, test_t *t, va_list ap) {
|
|||||||
ret = test_status_pass;
|
ret = test_status_pass;
|
||||||
for (; t != NULL; t = va_arg(ap, test_t *)) {
|
for (; t != NULL; t = va_arg(ap, test_t *)) {
|
||||||
/* Non-reentrant run. */
|
/* Non-reentrant run. */
|
||||||
reentrant = false;
|
reentrancy = non_reentrant;
|
||||||
|
hooks_arena_new_hook = hooks_libc_hook = NULL;
|
||||||
t();
|
t();
|
||||||
if (test_status > ret) {
|
if (test_status > ret) {
|
||||||
ret = test_status;
|
ret = test_status;
|
||||||
}
|
}
|
||||||
/* Reentrant run. */
|
/* Reentrant run. */
|
||||||
if (do_reentrant) {
|
if (do_reentrant) {
|
||||||
reentrant = true;
|
reentrancy = libc_reentrant;
|
||||||
hooks_libc_hook = &reentrancy_hook;
|
hooks_arena_new_hook = NULL;
|
||||||
|
hooks_libc_hook = &libc_reentrancy_hook;
|
||||||
t();
|
t();
|
||||||
if (test_status > ret) {
|
if (test_status > ret) {
|
||||||
ret = test_status;
|
ret = test_status;
|
||||||
}
|
}
|
||||||
if (hook_ran) {
|
|
||||||
hook_ran = false;
|
reentrancy = arena_new_reentrant;
|
||||||
for (int i = 0; i < NUM_REENTRANT_ALLOCS; i++) {
|
hooks_libc_hook = NULL;
|
||||||
free(to_free[i]);
|
hooks_arena_new_hook = &arena_new_reentrancy_hook;
|
||||||
}
|
t();
|
||||||
|
if (test_status > ret) {
|
||||||
|
ret = test_status;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ TEST_END
|
|||||||
|
|
||||||
int
|
int
|
||||||
main(void) {
|
main(void) {
|
||||||
return test(
|
return test_no_reentrancy(
|
||||||
test_malloc_vs_mallocx,
|
test_malloc_vs_mallocx,
|
||||||
test_free_vs_dallocx,
|
test_free_vs_dallocx,
|
||||||
test_dallocx_vs_sdallocx,
|
test_dallocx_vs_sdallocx,
|
||||||
|
@ -351,7 +351,7 @@ TEST_END
|
|||||||
|
|
||||||
int
|
int
|
||||||
main(void) {
|
main(void) {
|
||||||
return test(
|
return test_no_reentrancy(
|
||||||
test_stats_summary,
|
test_stats_summary,
|
||||||
test_stats_large,
|
test_stats_large,
|
||||||
test_stats_arenas_summary,
|
test_stats_arenas_summary,
|
||||||
|
@ -79,7 +79,6 @@ thd_start(void *arg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_BEGIN(test_tsd_main_thread) {
|
TEST_BEGIN(test_tsd_main_thread) {
|
||||||
test_skip_if(test_is_reentrant());
|
|
||||||
thd_start((void *)(uintptr_t)0xa5f3e329);
|
thd_start((void *)(uintptr_t)0xa5f3e329);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
@ -144,7 +143,7 @@ main(void) {
|
|||||||
data_tsd_boot();
|
data_tsd_boot();
|
||||||
data_test_started = true;
|
data_test_started = true;
|
||||||
|
|
||||||
return test(
|
return test_no_reentrancy(
|
||||||
test_tsd_main_thread,
|
test_tsd_main_thread,
|
||||||
test_tsd_sub_thread,
|
test_tsd_sub_thread,
|
||||||
test_tsd_reincarnation);
|
test_tsd_reincarnation);
|
||||||
|
Loading…
Reference in New Issue
Block a user