Hooks: Protect against reentrancy.

Previously, we made the user deal with this themselves, but that's not good
enough; if hooks may allocate, we should test the allocation pathways down
hooks.  If we're doing that, we might as well actually implement the protection
for the user.
This commit is contained in:
David Goldblatt 2018-05-15 14:15:43 -07:00 committed by David Goldblatt
parent 0379235f47
commit a7f749c9af
4 changed files with 106 additions and 12 deletions

View File

@ -25,9 +25,9 @@
* and only calls the alloc hook). * and only calls the alloc hook).
* *
* Reentrancy: * Reentrancy:
* Is not protected against. If your hooks allocate, then the hooks will be * Reentrancy is guarded against from within the hook implementation. If you
* called again. Note that you can guard against this with a thread-local * call allocator functions from within a hook, the hooks will not be invoked
* "in_hook" bool. * again.
* Threading: * Threading:
* The installation of a hook synchronizes with all its uses. If you can * The installation of a hook synchronizes with all its uses. If you can
* prove the installation of a hook happens-before a jemalloc entry point, * prove the installation of a hook happens-before a jemalloc entry point,

View File

@ -66,6 +66,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
#define MALLOC_TSD \ #define MALLOC_TSD \
O(tcache_enabled, bool, bool) \ O(tcache_enabled, bool, bool) \
O(arenas_tdata_bypass, bool, bool) \ O(arenas_tdata_bypass, bool, bool) \
O(in_hook, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \ O(reentrancy_level, int8_t, int8_t) \
O(narenas_tdata, uint32_t, uint32_t) \ O(narenas_tdata, uint32_t, uint32_t) \
O(offset_state, uint64_t, uint64_t) \ O(offset_state, uint64_t, uint64_t) \
@ -85,6 +86,7 @@ typedef ql_elm(tsd_t) tsd_link_t;
ATOMIC_INIT(tsd_state_uninitialized), \ ATOMIC_INIT(tsd_state_uninitialized), \
TCACHE_ENABLED_ZERO_INITIALIZER, \ TCACHE_ENABLED_ZERO_INITIALIZER, \
false, \ false, \
false, \
0, \ 0, \
0, \ 0, \
0, \ 0, \

View File

@ -99,12 +99,62 @@ for (int for_each_hook_counter = 0; \
#define FOR_EACH_HOOK_END \ #define FOR_EACH_HOOK_END \
} }
static bool *
hook_reentrantp() {
/*
* We prevent user reentrancy within hooks. This is basically just a
* thread-local bool that triggers an early-exit.
*
* We don't fold in_hook into reentrancy. There are two reasons for
* this:
* - Right now, we turn on reentrancy during things like extent hook
* execution. Allocating during extent hooks is not officially
* supported, but we don't want to break it for the time being. These
* sorts of allocations should probably still be hooked, though.
* - If a hook allocates, we may want it to be relatively fast (after
* all, it executes on every allocator operation). Turning on
* reentrancy is a fairly heavyweight mode (disabling tcache,
* redirecting to arena 0, etc.). It's possible we may one day want
* to turn on reentrant mode here, if it proves too difficult to keep
* this working. But that's fairly easy for us to see; OTOH, people
* not using hooks because they're too slow is easy for us to miss.
*
* The tricky part is
* that this code might get invoked even if we don't have access to tsd.
* This function mimics getting a pointer to thread-local data, except
* that it might secretly return a pointer to some global data if we
* know that the caller will take the early-exit path.
* If we return a bool that indicates that we are reentrant, then the
* caller will go down the early exit path, leaving the global
* untouched.
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
bool *in_hook = tsdn_in_hookp_get(tsdn);
if (in_hook != NULL) {
return in_hook;
}
return &in_hook_global;
}
#define HOOK_PROLOGUE \
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
return; \
} \
bool *in_hook = hook_reentrantp(); \
if (*in_hook) { \
return; \
} \
*in_hook = true;
#define HOOK_EPILOGUE \
*in_hook = false;
void void
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw, hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
uintptr_t args_raw[3]) { uintptr_t args_raw[3]) {
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { HOOK_PROLOGUE
return;
}
hooks_internal_t hook; hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook) FOR_EACH_HOOK_BEGIN(&hook)
hook_alloc h = hook.hooks.alloc_hook; hook_alloc h = hook.hooks.alloc_hook;
@ -112,13 +162,13 @@ hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
h(hook.hooks.extra, type, result, result_raw, args_raw); h(hook.hooks.extra, type, result, result_raw, args_raw);
} }
FOR_EACH_HOOK_END FOR_EACH_HOOK_END
HOOK_EPILOGUE
} }
void void
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) { hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { HOOK_PROLOGUE
return;
}
hooks_internal_t hook; hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook) FOR_EACH_HOOK_BEGIN(&hook)
hook_dalloc h = hook.hooks.dalloc_hook; hook_dalloc h = hook.hooks.dalloc_hook;
@ -126,14 +176,13 @@ hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
h(hook.hooks.extra, type, address, args_raw); h(hook.hooks.extra, type, address, args_raw);
} }
FOR_EACH_HOOK_END FOR_EACH_HOOK_END
HOOK_EPILOGUE
} }
void void
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize, hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) { size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { HOOK_PROLOGUE
return;
}
hooks_internal_t hook; hooks_internal_t hook;
FOR_EACH_HOOK_BEGIN(&hook) FOR_EACH_HOOK_BEGIN(&hook)
hook_expand h = hook.hooks.expand_hook; hook_expand h = hook.hooks.expand_hook;
@ -142,4 +191,5 @@ hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
result_raw, args_raw); result_raw, args_raw);
} }
FOR_EACH_HOOK_END FOR_EACH_HOOK_END
HOOK_EPILOGUE
} }

View File

@ -25,6 +25,45 @@ reset_args() {
memset(arg_args_raw, 77, sizeof(arg_args_raw)); memset(arg_args_raw, 77, sizeof(arg_args_raw));
} }
static void
alloc_free_size(size_t sz) {
void *ptr = mallocx(1, 0);
free(ptr);
ptr = mallocx(1, 0);
free(ptr);
ptr = mallocx(1, MALLOCX_TCACHE_NONE);
dallocx(ptr, MALLOCX_TCACHE_NONE);
}
/*
* We want to support a degree of user reentrancy. This tests a variety of
* allocation scenarios.
*/
static void
be_reentrant() {
/* Let's make sure the tcache is non-empty if enabled. */
alloc_free_size(1);
alloc_free_size(1024);
alloc_free_size(64 * 1024);
alloc_free_size(256 * 1024);
alloc_free_size(1024 * 1024);
/* Some reallocation. */
void *ptr = mallocx(129, 0);
ptr = rallocx(ptr, 130, 0);
free(ptr);
ptr = mallocx(2 * 1024 * 1024, 0);
free(ptr);
ptr = mallocx(1 * 1024 * 1024, 0);
ptr = rallocx(ptr, 2 * 1024 * 1024, 0);
free(ptr);
ptr = mallocx(1, 0);
ptr = rallocx(ptr, 1000, 0);
free(ptr);
}
static void static void
set_args_raw(uintptr_t *args_raw, int nargs) { set_args_raw(uintptr_t *args_raw, int nargs) {
memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs); memcpy(arg_args_raw, args_raw, sizeof(uintptr_t) * nargs);
@ -52,6 +91,7 @@ test_alloc_hook(void *extra, hook_alloc_t type, void *result,
arg_result = result; arg_result = result;
arg_result_raw = result_raw; arg_result_raw = result_raw;
set_args_raw(args_raw, 3); set_args_raw(args_raw, 3);
be_reentrant();
} }
static void static void
@ -62,6 +102,7 @@ test_dalloc_hook(void *extra, hook_dalloc_t type, void *address,
arg_type = (int)type; arg_type = (int)type;
arg_address = address; arg_address = address;
set_args_raw(args_raw, 3); set_args_raw(args_raw, 3);
be_reentrant();
} }
static void static void
@ -76,6 +117,7 @@ test_expand_hook(void *extra, hook_expand_t type, void *address,
arg_new_usize = new_usize; arg_new_usize = new_usize;
arg_result_raw = result_raw; arg_result_raw = result_raw;
set_args_raw(args_raw, 4); set_args_raw(args_raw, 4);
be_reentrant();
} }
TEST_BEGIN(test_hooks_basic) { TEST_BEGIN(test_hooks_basic) {