2018-04-10 10:11:46 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/hook.h"
|
|
|
|
|
|
|
|
#include "jemalloc/internal/atomic.h"
|
|
|
|
#include "jemalloc/internal/mutex.h"
|
|
|
|
#include "jemalloc/internal/seq.h"
|
|
|
|
|
|
|
|
typedef struct hooks_internal_s hooks_internal_t;
|
|
|
|
struct hooks_internal_s {
|
|
|
|
hooks_t hooks;
|
|
|
|
bool in_use;
|
|
|
|
};
|
|
|
|
|
|
|
|
seq_define(hooks_internal_t, hooks)
|
|
|
|
|
2018-04-20 02:35:33 +08:00
|
|
|
static atomic_u_t nhooks = ATOMIC_INIT(0);
|
2018-05-01 07:24:36 +08:00
|
|
|
static seq_hooks_t hooks[HOOK_MAX];
|
2018-04-10 10:11:46 +08:00
|
|
|
static malloc_mutex_t hooks_mu;
|
|
|
|
|
|
|
|
bool
|
|
|
|
hook_boot() {
|
|
|
|
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
|
|
|
|
malloc_mutex_rank_exclusive);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
2018-04-27 06:46:08 +08:00
|
|
|
hook_install_locked(hooks_t *to_install) {
|
2018-04-10 10:11:46 +08:00
|
|
|
hooks_internal_t hooks_internal;
|
2018-05-01 07:24:36 +08:00
|
|
|
for (int i = 0; i < HOOK_MAX; i++) {
|
2018-04-10 10:11:46 +08:00
|
|
|
bool success = seq_try_load_hooks(&hooks_internal, &hooks[i]);
|
|
|
|
/* We hold mu; no concurrent access. */
|
|
|
|
assert(success);
|
|
|
|
if (!hooks_internal.in_use) {
|
|
|
|
hooks_internal.hooks = *to_install;
|
|
|
|
hooks_internal.in_use = true;
|
|
|
|
seq_store_hooks(&hooks[i], &hooks_internal);
|
2018-04-20 02:35:33 +08:00
|
|
|
atomic_store_u(&nhooks,
|
|
|
|
atomic_load_u(&nhooks, ATOMIC_RELAXED) + 1,
|
|
|
|
ATOMIC_RELAXED);
|
2018-04-10 10:11:46 +08:00
|
|
|
return &hooks[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2018-04-27 06:46:08 +08:00
|
|
|
hook_install(tsdn_t *tsdn, hooks_t *to_install) {
|
2018-04-10 10:11:46 +08:00
|
|
|
malloc_mutex_lock(tsdn, &hooks_mu);
|
2018-04-27 06:46:08 +08:00
|
|
|
void *ret = hook_install_locked(to_install);
|
2018-04-10 10:11:46 +08:00
|
|
|
if (ret != NULL) {
|
|
|
|
tsd_global_slow_inc(tsdn);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(tsdn, &hooks_mu);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hook_remove_locked(seq_hooks_t *to_remove) {
|
|
|
|
hooks_internal_t hooks_internal;
|
|
|
|
bool success = seq_try_load_hooks(&hooks_internal, to_remove);
|
|
|
|
/* We hold mu; no concurrent access. */
|
|
|
|
assert(success);
|
|
|
|
/* Should only remove hooks that were added. */
|
|
|
|
assert(hooks_internal.in_use);
|
|
|
|
hooks_internal.in_use = false;
|
|
|
|
seq_store_hooks(to_remove, &hooks_internal);
|
2018-04-20 02:35:33 +08:00
|
|
|
atomic_store_u(&nhooks, atomic_load_u(&nhooks, ATOMIC_RELAXED) - 1,
|
|
|
|
ATOMIC_RELAXED);
|
2018-04-10 10:11:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hook_remove(tsdn_t *tsdn, void *opaque) {
|
|
|
|
if (config_debug) {
|
|
|
|
char *hooks_begin = (char *)&hooks[0];
|
2018-05-01 07:24:36 +08:00
|
|
|
char *hooks_end = (char *)&hooks[HOOK_MAX];
|
2018-04-10 10:11:46 +08:00
|
|
|
char *hook = (char *)opaque;
|
|
|
|
assert(hooks_begin <= hook && hook < hooks_end
|
|
|
|
&& (hook - hooks_begin) % sizeof(seq_hooks_t) == 0);
|
|
|
|
}
|
|
|
|
malloc_mutex_lock(tsdn, &hooks_mu);
|
|
|
|
hook_remove_locked((seq_hooks_t *)opaque);
|
|
|
|
tsd_global_slow_dec(tsdn);
|
|
|
|
malloc_mutex_unlock(tsdn, &hooks_mu);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define FOR_EACH_HOOK_BEGIN(hooks_internal_ptr) \
|
|
|
|
for (int for_each_hook_counter = 0; \
|
2018-05-01 07:24:36 +08:00
|
|
|
for_each_hook_counter < HOOK_MAX; \
|
2018-04-10 10:11:46 +08:00
|
|
|
for_each_hook_counter++) { \
|
|
|
|
bool for_each_hook_success = seq_try_load_hooks( \
|
|
|
|
(hooks_internal_ptr), &hooks[for_each_hook_counter]); \
|
|
|
|
if (!for_each_hook_success) { \
|
|
|
|
continue; \
|
|
|
|
} \
|
|
|
|
if (!(hooks_internal_ptr)->in_use) { \
|
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
#define FOR_EACH_HOOK_END \
|
|
|
|
}
|
|
|
|
|
2018-05-16 05:15:43 +08:00
|
|
|
static bool *
|
|
|
|
hook_reentrantp() {
|
|
|
|
/*
|
|
|
|
* We prevent user reentrancy within hooks. This is basically just a
|
|
|
|
* thread-local bool that triggers an early-exit.
|
|
|
|
*
|
|
|
|
* We don't fold in_hook into reentrancy. There are two reasons for
|
|
|
|
* this:
|
|
|
|
* - Right now, we turn on reentrancy during things like extent hook
|
|
|
|
* execution. Allocating during extent hooks is not officially
|
|
|
|
* supported, but we don't want to break it for the time being. These
|
|
|
|
* sorts of allocations should probably still be hooked, though.
|
|
|
|
* - If a hook allocates, we may want it to be relatively fast (after
|
|
|
|
* all, it executes on every allocator operation). Turning on
|
|
|
|
* reentrancy is a fairly heavyweight mode (disabling tcache,
|
|
|
|
* redirecting to arena 0, etc.). It's possible we may one day want
|
|
|
|
* to turn on reentrant mode here, if it proves too difficult to keep
|
|
|
|
* this working. But that's fairly easy for us to see; OTOH, people
|
|
|
|
* not using hooks because they're too slow is easy for us to miss.
|
|
|
|
*
|
|
|
|
* The tricky part is
|
|
|
|
* that this code might get invoked even if we don't have access to tsd.
|
|
|
|
* This function mimics getting a pointer to thread-local data, except
|
|
|
|
* that it might secretly return a pointer to some global data if we
|
|
|
|
* know that the caller will take the early-exit path.
|
|
|
|
* If we return a bool that indicates that we are reentrant, then the
|
|
|
|
* caller will go down the early exit path, leaving the global
|
|
|
|
* untouched.
|
|
|
|
*/
|
|
|
|
static bool in_hook_global = true;
|
|
|
|
tsdn_t *tsdn = tsdn_fetch();
|
2020-04-08 07:59:57 +08:00
|
|
|
bool *in_hook = tsdn_in_hookp_get(tsdn);
|
|
|
|
if (in_hook!= NULL) {
|
|
|
|
return in_hook;
|
2018-05-16 05:15:43 +08:00
|
|
|
}
|
|
|
|
return &in_hook_global;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define HOOK_PROLOGUE \
|
|
|
|
if (likely(atomic_load_u(&nhooks, ATOMIC_RELAXED) == 0)) { \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
bool *in_hook = hook_reentrantp(); \
|
|
|
|
if (*in_hook) { \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
*in_hook = true;
|
|
|
|
|
|
|
|
#define HOOK_EPILOGUE \
|
|
|
|
*in_hook = false;
|
|
|
|
|
2018-04-10 10:11:46 +08:00
|
|
|
void
|
|
|
|
hook_invoke_alloc(hook_alloc_t type, void *result, uintptr_t result_raw,
|
|
|
|
uintptr_t args_raw[3]) {
|
2018-05-16 05:15:43 +08:00
|
|
|
HOOK_PROLOGUE
|
|
|
|
|
2018-04-10 10:11:46 +08:00
|
|
|
hooks_internal_t hook;
|
|
|
|
FOR_EACH_HOOK_BEGIN(&hook)
|
|
|
|
hook_alloc h = hook.hooks.alloc_hook;
|
|
|
|
if (h != NULL) {
|
2018-04-27 06:46:08 +08:00
|
|
|
h(hook.hooks.extra, type, result, result_raw, args_raw);
|
2018-04-10 10:11:46 +08:00
|
|
|
}
|
|
|
|
FOR_EACH_HOOK_END
|
2018-05-16 05:15:43 +08:00
|
|
|
|
|
|
|
HOOK_EPILOGUE
|
2018-04-10 10:11:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hook_invoke_dalloc(hook_dalloc_t type, void *address, uintptr_t args_raw[3]) {
|
2018-05-16 05:15:43 +08:00
|
|
|
HOOK_PROLOGUE
|
2018-04-10 10:11:46 +08:00
|
|
|
hooks_internal_t hook;
|
|
|
|
FOR_EACH_HOOK_BEGIN(&hook)
|
|
|
|
hook_dalloc h = hook.hooks.dalloc_hook;
|
|
|
|
if (h != NULL) {
|
2018-04-27 06:46:08 +08:00
|
|
|
h(hook.hooks.extra, type, address, args_raw);
|
2018-04-10 10:11:46 +08:00
|
|
|
}
|
|
|
|
FOR_EACH_HOOK_END
|
2018-05-16 05:15:43 +08:00
|
|
|
HOOK_EPILOGUE
|
2018-04-10 10:11:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hook_invoke_expand(hook_expand_t type, void *address, size_t old_usize,
|
|
|
|
size_t new_usize, uintptr_t result_raw, uintptr_t args_raw[4]) {
|
2018-05-16 05:15:43 +08:00
|
|
|
HOOK_PROLOGUE
|
2018-04-10 10:11:46 +08:00
|
|
|
hooks_internal_t hook;
|
|
|
|
FOR_EACH_HOOK_BEGIN(&hook)
|
|
|
|
hook_expand h = hook.hooks.expand_hook;
|
|
|
|
if (h != NULL) {
|
2018-04-27 06:46:08 +08:00
|
|
|
h(hook.hooks.extra, type, address, old_usize, new_usize,
|
2018-04-10 10:11:46 +08:00
|
|
|
result_raw, args_raw);
|
|
|
|
}
|
|
|
|
FOR_EACH_HOOK_END
|
2018-05-16 05:15:43 +08:00
|
|
|
HOOK_EPILOGUE
|
2018-04-10 10:11:46 +08:00
|
|
|
}
|