Fix tsd bootstrapping for a0malloc().
This commit is contained in:
parent
919e4a0ea9
commit
0c12dcabc5
@ -135,6 +135,7 @@ C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
|
||||
$(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
|
||||
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/util.c
|
||||
TESTS_UNIT := \
|
||||
$(srcroot)test/unit/a0.c \
|
||||
$(srcroot)test/unit/arena_reset.c \
|
||||
$(srcroot)test/unit/atomic.c \
|
||||
$(srcroot)test/unit/bitmap.c \
|
||||
|
@ -254,7 +254,7 @@ typedef struct {
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static bool malloc_init_hard_a0(tsd_t *tsd);
|
||||
static bool malloc_init_hard_a0(void);
|
||||
static bool malloc_init_hard(void);
|
||||
|
||||
/******************************************************************************/
|
||||
@ -291,7 +291,7 @@ malloc_init_a0(void)
|
||||
{
|
||||
|
||||
if (unlikely(malloc_init_state == malloc_init_uninitialized))
|
||||
return (malloc_init_hard_a0(NULL));
|
||||
return (malloc_init_hard_a0());
|
||||
return (false);
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ malloc_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* The a0*() functions are used instead of i[mcd]alloc() in situations that
|
||||
* The a0*() functions are used instead of i{d,}alloc() in situations that
|
||||
* cannot tolerate TLS variable access.
|
||||
*/
|
||||
|
||||
@ -318,8 +318,8 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
|
||||
if (unlikely(malloc_init_a0()))
|
||||
return (NULL);
|
||||
|
||||
return (iallocztm(NULL, size, size2index(size), zero, NULL,
|
||||
is_metadata, arena_get(NULL, 0, true), true));
|
||||
return (iallocztm(NULL, size, size2index(size), zero, NULL, is_metadata,
|
||||
arena_get(NULL, 0, true), true));
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1256,7 +1256,7 @@ malloc_init_hard_needed(void)
|
||||
}
|
||||
|
||||
static bool
|
||||
malloc_init_hard_a0_locked(tsd_t *tsd)
|
||||
malloc_init_hard_a0_locked(tsd_t **tsd)
|
||||
{
|
||||
|
||||
malloc_initializer = INITIALIZER;
|
||||
@ -1283,7 +1283,7 @@ malloc_init_hard_a0_locked(tsd_t *tsd)
|
||||
prof_boot1();
|
||||
if (arena_boot())
|
||||
return (true);
|
||||
if (config_tcache && tcache_boot(tsd))
|
||||
if (config_tcache && tcache_boot(*tsd))
|
||||
return (true);
|
||||
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
|
||||
return (true);
|
||||
@ -1299,38 +1299,41 @@ malloc_init_hard_a0_locked(tsd_t *tsd)
|
||||
* Initialize one arena here. The rest are lazily created in
|
||||
* arena_choose_hard().
|
||||
*/
|
||||
if (arena_init(tsd, 0) == NULL)
|
||||
if (arena_init(*tsd, 0) == NULL)
|
||||
return (true);
|
||||
|
||||
/*
|
||||
* Initialize tsd, since some code paths cause chunk allocation, which
|
||||
* in turn depends on tsd.
|
||||
*/
|
||||
*tsd = malloc_tsd_boot0();
|
||||
if (*tsd == NULL)
|
||||
return (true);
|
||||
|
||||
malloc_init_state = malloc_init_a0_initialized;
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
malloc_init_hard_a0(tsd_t *tsd)
|
||||
malloc_init_hard_a0(void)
|
||||
{
|
||||
bool ret;
|
||||
tsd_t *tsd = NULL;
|
||||
|
||||
malloc_mutex_lock(tsd, &init_lock);
|
||||
ret = malloc_init_hard_a0_locked(tsd);
|
||||
ret = malloc_init_hard_a0_locked(&tsd);
|
||||
malloc_mutex_unlock(tsd, &init_lock);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/* Initialize data structures which may trigger recursive allocation. */
|
||||
static bool
|
||||
malloc_init_hard_recursible(tsd_t **tsd)
|
||||
malloc_init_hard_recursible(tsd_t *tsd)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
malloc_init_state = malloc_init_recursible;
|
||||
malloc_mutex_unlock(*tsd, &init_lock);
|
||||
|
||||
/* LinuxThreads' pthread_setspecific() allocates. */
|
||||
*tsd = malloc_tsd_boot0();
|
||||
if (*tsd == NULL) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
malloc_mutex_unlock(tsd, &init_lock);
|
||||
|
||||
ncpus = malloc_ncpus();
|
||||
|
||||
@ -1339,17 +1342,16 @@ malloc_init_hard_recursible(tsd_t **tsd)
|
||||
/* LinuxThreads' pthread_atfork() allocates. */
|
||||
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
||||
jemalloc_postfork_child) != 0) {
|
||||
ret = true;
|
||||
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
|
||||
if (opt_abort)
|
||||
abort();
|
||||
malloc_mutex_lock(tsd, &init_lock);
|
||||
return (true);
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = false;
|
||||
label_return:
|
||||
malloc_mutex_lock(*tsd, &init_lock);
|
||||
return (ret);
|
||||
malloc_mutex_lock(tsd, &init_lock);
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -1409,12 +1411,12 @@ malloc_init_hard(void)
|
||||
}
|
||||
|
||||
if (malloc_init_state != malloc_init_a0_initialized &&
|
||||
malloc_init_hard_a0_locked(tsd)) {
|
||||
malloc_init_hard_a0_locked(&tsd)) {
|
||||
malloc_mutex_unlock(tsd, &init_lock);
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (malloc_init_hard_recursible(&tsd)) {
|
||||
if (malloc_init_hard_recursible(tsd)) {
|
||||
malloc_mutex_unlock(tsd, &init_lock);
|
||||
return (true);
|
||||
}
|
||||
@ -2669,6 +2671,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
||||
* to trigger the deadlock described above, but doing so would involve forking
|
||||
* via a library constructor that runs before jemalloc's runs.
|
||||
*/
|
||||
#ifndef JEMALLOC_JET
|
||||
JEMALLOC_ATTR(constructor)
|
||||
static void
|
||||
jemalloc_constructor(void)
|
||||
@ -2676,6 +2679,7 @@ jemalloc_constructor(void)
|
||||
|
||||
malloc_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef JEMALLOC_MUTEX_INIT_CB
|
||||
void
|
||||
|
@ -311,6 +311,9 @@ label_test_end: \
|
||||
#define test(...) \
|
||||
p_test(__VA_ARGS__, NULL)
|
||||
|
||||
#define test_no_malloc_init(...) \
|
||||
p_test_no_malloc_init(__VA_ARGS__, NULL)
|
||||
|
||||
#define test_skip_if(e) do { \
|
||||
if (e) { \
|
||||
test_skip("%s:%s:%d: Test skipped: (%s)", \
|
||||
@ -324,6 +327,7 @@ void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
|
||||
|
||||
/* For private use by macros. */
|
||||
test_status_t p_test(test_t *t, ...);
|
||||
test_status_t p_test_no_malloc_init(test_t *t, ...);
|
||||
void p_test_init(const char *name);
|
||||
void p_test_fini(void);
|
||||
void p_test_fail(const char *prefix, const char *message);
|
||||
|
@ -60,32 +60,30 @@ p_test_fini(void)
|
||||
malloc_printf("%s: %s\n", test_name, test_status_string(test_status));
|
||||
}
|
||||
|
||||
test_status_t
|
||||
p_test(test_t *t, ...)
|
||||
static test_status_t
|
||||
p_test_impl(bool do_malloc_init, test_t *t, va_list ap)
|
||||
{
|
||||
test_status_t ret;
|
||||
va_list ap;
|
||||
|
||||
if (do_malloc_init) {
|
||||
/*
|
||||
* Make sure initialization occurs prior to running tests. Tests are
|
||||
* special because they may use internal facilities prior to triggering
|
||||
* initialization as a side effect of calling into the public API. This
|
||||
* is a final safety that works even if jemalloc_constructor() doesn't
|
||||
* run, as for MSVC builds.
|
||||
* Make sure initialization occurs prior to running tests.
|
||||
* Tests are special because they may use internal facilities
|
||||
* prior to triggering initialization as a side effect of
|
||||
* calling into the public API.
|
||||
*/
|
||||
if (nallocx(1, 0) == 0) {
|
||||
malloc_printf("Initialization error");
|
||||
return (test_status_fail);
|
||||
}
|
||||
}
|
||||
|
||||
ret = test_status_pass;
|
||||
va_start(ap, t);
|
||||
for (; t != NULL; t = va_arg(ap, test_t *)) {
|
||||
t();
|
||||
if (test_status > ret)
|
||||
ret = test_status;
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
malloc_printf("--- %s: %u/%u, %s: %u/%u, %s: %u/%u ---\n",
|
||||
test_status_string(test_status_pass),
|
||||
@ -98,6 +96,34 @@ p_test(test_t *t, ...)
|
||||
return (ret);
|
||||
}
|
||||
|
||||
test_status_t
|
||||
p_test(test_t *t, ...)
|
||||
{
|
||||
test_status_t ret;
|
||||
va_list ap;
|
||||
|
||||
ret = test_status_pass;
|
||||
va_start(ap, t);
|
||||
ret = p_test_impl(true, t, ap);
|
||||
va_end(ap);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
test_status_t
|
||||
p_test_no_malloc_init(test_t *t, ...)
|
||||
{
|
||||
test_status_t ret;
|
||||
va_list ap;
|
||||
|
||||
ret = test_status_pass;
|
||||
va_start(ap, t);
|
||||
ret = p_test_impl(false, t, ap);
|
||||
va_end(ap);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
p_test_fail(const char *prefix, const char *message)
|
||||
{
|
||||
|
19
test/unit/a0.c
Normal file
19
test/unit/a0.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
TEST_BEGIN(test_a0)
|
||||
{
|
||||
void *p;
|
||||
|
||||
p = a0malloc(1);
|
||||
assert_ptr_not_null(p, "Unexpected a0malloc() error");
|
||||
a0dalloc(p);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void)
|
||||
{
|
||||
|
||||
return (test_no_malloc_init(
|
||||
test_a0));
|
||||
}
|
@ -244,7 +244,6 @@ int
|
||||
main(void)
|
||||
{
|
||||
|
||||
assert(!config_fill || opt_junk_alloc || opt_junk_free);
|
||||
return (test(
|
||||
test_junk_small,
|
||||
test_junk_large,
|
||||
|
@ -99,6 +99,11 @@ int
|
||||
main(void)
|
||||
{
|
||||
|
||||
/* Core tsd bootstrapping must happen prior to data_tsd_boot(). */
|
||||
if (nallocx(1, 0) == 0) {
|
||||
malloc_printf("Initialization error");
|
||||
return (test_status_fail);
|
||||
}
|
||||
data_tsd_boot();
|
||||
|
||||
return (test(
|
||||
|
Loading…
Reference in New Issue
Block a user