Add basic reentrancy-checking support, and allow arena_new to reenter.

This checks whether or not we're reentrant using thread-local data, and, if we
are, moves certain internal allocations to use arena 0 (which should be properly
initialized after bootstrapping).

The immediate thing this allows is spinning up threads in arena_new, which will
enable spinning up background threads there.
This commit is contained in:
David Goldblatt
2017-03-31 19:59:45 -07:00
committed by David Goldblatt
parent 0a0fcd3e6a
commit b407a65401
10 changed files with 170 additions and 47 deletions

View File

@@ -1,8 +1,8 @@
#ifndef JEMALLOC_INTERNAL_HOOKS_H
#define JEMALLOC_INTERNAL_HOOKS_H
extern void (*hooks_arena_new_hook)();
extern void (*hooks_libc_hook)();
extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)();
extern JEMALLOC_EXPORT void (*hooks_libc_hook)();
#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)

View File

@@ -1013,6 +1013,11 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
return arena;
}
/* During reentrancy, arena 0 is the safest bet. */
if (*tsd_reentrancy_levelp_get(tsd) > 1) {
return arena_get(tsd_tsdn(tsd), 0, true);
}
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
if (unlikely(ret == NULL)) {
ret = arena_choose_hard(tsd, internal);
@@ -1193,7 +1198,9 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_internal,
if (config_stats && is_internal) {
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
}
if (!is_internal && *tsd_reentrancy_levelp_get(tsdn_tsd(tsdn)) != 0) {
tcache = NULL;
}
arena_dalloc(tsdn, ptr, tcache, slow_path);
}

View File

@@ -232,6 +232,7 @@ hash_rotl_64
hash_x64_128
hash_x86_128
hash_x86_32
hooks_arena_new_hook
hooks_libc_hook
iaalloc
ialloc
@@ -537,6 +538,9 @@ tsd_init_head
tsd_narenas_tdata_get
tsd_narenas_tdata_set
tsd_narenas_tdatap_get
tsd_reentrancy_level_get
tsd_reentrancy_level_set
tsd_reentrancy_levelp_get
tsd_wrapper_get
tsd_wrapper_set
tsd_nominal

View File

@@ -65,7 +65,8 @@ struct tsd_init_head_s {
O(witnesses, witness_list_t, no, no, yes) \
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
no, no, no) \
O(witness_fork, bool, yes, no, no)
O(witness_fork, bool, yes, no, no) \
O(reentrancy_level, int, no, no, no)
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@@ -82,7 +83,8 @@ struct tsd_init_head_s {
TCACHE_ZERO_INITIALIZER, \
ql_head_initializer(witnesses), \
RTREE_ELM_WITNESS_TSD_INITIALIZER, \
false \
false, \
0 \
}
struct tsd_s {