2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Runtime configuration options. */
|
2016-10-29 14:03:25 +08:00
|
|
|
const char *je_malloc_conf
|
2016-10-29 14:59:42 +08:00
|
|
|
#ifndef _WIN32
|
2016-10-29 14:03:25 +08:00
|
|
|
JEMALLOC_ATTR(weak)
|
|
|
|
#endif
|
|
|
|
;
|
2013-01-23 08:54:26 +08:00
|
|
|
bool opt_abort =
|
|
|
|
#ifdef JEMALLOC_DEBUG
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
2014-12-09 05:12:41 +08:00
|
|
|
const char *opt_junk =
|
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
"true"
|
|
|
|
#else
|
|
|
|
"false"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
bool opt_junk_alloc =
|
2013-01-23 08:54:26 +08:00
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
2014-12-09 05:12:41 +08:00
|
|
|
bool opt_junk_free =
|
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t opt_quarantine = ZU(0);
|
2012-04-13 08:09:54 +08:00
|
|
|
bool opt_redzone = false;
|
2012-04-06 04:36:17 +08:00
|
|
|
bool opt_utrace = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_xmalloc = false;
|
|
|
|
bool opt_zero = false;
|
2016-02-25 03:03:40 +08:00
|
|
|
unsigned opt_narenas = 0;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2014-04-16 05:33:50 +08:00
|
|
|
/* Initialized to true if the process is running inside Valgrind. */
|
|
|
|
bool in_valgrind;
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
unsigned ncpus;
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
/* Protects arenas initialization. */
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
static malloc_mutex_t arenas_lock;
|
|
|
|
/*
|
|
|
|
* Arenas that are used to service external requests. Not all elements of the
|
|
|
|
* arenas array are necessarily used; arenas are created lazily as needed.
|
|
|
|
*
|
|
|
|
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
|
|
|
|
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
|
|
|
* takes some action to create them and allocate from them.
|
|
|
|
*/
|
2016-02-25 15:58:10 +08:00
|
|
|
arena_t **arenas;
|
|
|
|
static unsigned narenas_total; /* Use narenas_total_*(). */
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
static arena_t *a0; /* arenas[0]; read-only after initialization. */
|
2016-04-23 05:34:14 +08:00
|
|
|
unsigned narenas_auto; /* Read-only after initialization. */
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
typedef enum {
|
|
|
|
malloc_init_uninitialized = 3,
|
|
|
|
malloc_init_a0_initialized = 2,
|
|
|
|
malloc_init_recursible = 1,
|
|
|
|
malloc_init_initialized = 0 /* Common case --> jnz. */
|
|
|
|
} malloc_init_t;
|
|
|
|
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
/* False should be the common case. Set to true to trigger initialization. */
|
2015-10-28 06:12:10 +08:00
|
|
|
static bool malloc_slow = true;
|
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
/* When malloc_slow is true, set the corresponding bits for sanity check. */
|
2015-10-28 06:12:10 +08:00
|
|
|
enum {
|
|
|
|
flag_opt_junk_alloc = (1U),
|
|
|
|
flag_opt_junk_free = (1U << 1),
|
|
|
|
flag_opt_quarantine = (1U << 2),
|
|
|
|
flag_opt_zero = (1U << 3),
|
|
|
|
flag_opt_utrace = (1U << 4),
|
|
|
|
flag_in_valgrind = (1U << 5),
|
|
|
|
flag_opt_xmalloc = (1U << 6)
|
|
|
|
};
|
|
|
|
static uint8_t malloc_slow_flags;
|
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
JEMALLOC_ALIGNED(CACHELINE)
|
2016-04-09 05:17:57 +08:00
|
|
|
const size_t pind2sz_tab[NPSIZES] = {
|
|
|
|
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
|
|
|
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
|
|
|
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
|
|
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
|
|
|
|
PSZ_##psz(lg_grp, ndelta, lg_delta)
|
|
|
|
SIZE_CLASSES
|
|
|
|
#undef PSZ_yes
|
|
|
|
#undef PSZ_no
|
|
|
|
#undef SC
|
|
|
|
};
|
|
|
|
|
|
|
|
JEMALLOC_ALIGNED(CACHELINE)
|
|
|
|
const size_t index2size_tab[NSIZES] = {
|
2016-04-18 07:16:11 +08:00
|
|
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
|
2014-10-06 08:54:10 +08:00
|
|
|
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
|
|
|
SIZE_CLASSES
|
|
|
|
#undef SC
|
|
|
|
};
|
|
|
|
|
|
|
|
JEMALLOC_ALIGNED(CACHELINE)
|
|
|
|
const uint8_t size2index_tab[] = {
|
2014-10-11 13:34:25 +08:00
|
|
|
#if LG_TINY_MIN == 0
|
|
|
|
#warning "Dangerous LG_TINY_MIN"
|
|
|
|
#define S2B_0(i) i,
|
|
|
|
#elif LG_TINY_MIN == 1
|
|
|
|
#warning "Dangerous LG_TINY_MIN"
|
|
|
|
#define S2B_1(i) i,
|
|
|
|
#elif LG_TINY_MIN == 2
|
|
|
|
#warning "Dangerous LG_TINY_MIN"
|
|
|
|
#define S2B_2(i) i,
|
|
|
|
#elif LG_TINY_MIN == 3
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_3(i) i,
|
2014-10-11 13:34:25 +08:00
|
|
|
#elif LG_TINY_MIN == 4
|
|
|
|
#define S2B_4(i) i,
|
|
|
|
#elif LG_TINY_MIN == 5
|
|
|
|
#define S2B_5(i) i,
|
|
|
|
#elif LG_TINY_MIN == 6
|
|
|
|
#define S2B_6(i) i,
|
|
|
|
#elif LG_TINY_MIN == 7
|
|
|
|
#define S2B_7(i) i,
|
|
|
|
#elif LG_TINY_MIN == 8
|
|
|
|
#define S2B_8(i) i,
|
|
|
|
#elif LG_TINY_MIN == 9
|
|
|
|
#define S2B_9(i) i,
|
|
|
|
#elif LG_TINY_MIN == 10
|
|
|
|
#define S2B_10(i) i,
|
|
|
|
#elif LG_TINY_MIN == 11
|
|
|
|
#define S2B_11(i) i,
|
|
|
|
#else
|
|
|
|
#error "Unsupported LG_TINY_MIN"
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 1
|
|
|
|
#define S2B_1(i) S2B_0(i) S2B_0(i)
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 2
|
|
|
|
#define S2B_2(i) S2B_1(i) S2B_1(i)
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 3
|
|
|
|
#define S2B_3(i) S2B_2(i) S2B_2(i)
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 4
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_4(i) S2B_3(i) S2B_3(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 5
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_5(i) S2B_4(i) S2B_4(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 6
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_6(i) S2B_5(i) S2B_5(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 7
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_7(i) S2B_6(i) S2B_6(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 8
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_8(i) S2B_7(i) S2B_7(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 9
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_9(i) S2B_8(i) S2B_8(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 10
|
2014-10-10 08:54:06 +08:00
|
|
|
#define S2B_10(i) S2B_9(i) S2B_9(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 11
|
2014-10-10 08:54:06 +08:00
|
|
|
#define S2B_11(i) S2B_10(i) S2B_10(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_no(i)
|
2016-04-18 07:16:11 +08:00
|
|
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
|
2014-10-06 08:54:10 +08:00
|
|
|
S2B_##lg_delta_lookup(index)
|
|
|
|
SIZE_CLASSES
|
|
|
|
#undef S2B_3
|
|
|
|
#undef S2B_4
|
|
|
|
#undef S2B_5
|
|
|
|
#undef S2B_6
|
|
|
|
#undef S2B_7
|
|
|
|
#undef S2B_8
|
|
|
|
#undef S2B_9
|
2014-10-10 08:54:06 +08:00
|
|
|
#undef S2B_10
|
|
|
|
#undef S2B_11
|
2014-10-06 08:54:10 +08:00
|
|
|
#undef S2B_no
|
|
|
|
#undef SC
|
|
|
|
};
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Used to let the initializing thread recursively allocate. */
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER ((unsigned long)0)
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER pthread_self()
|
|
|
|
# define IS_INITIALIZER (malloc_initializer == pthread_self())
|
2012-04-06 02:06:23 +08:00
|
|
|
static pthread_t malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER false
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER true
|
|
|
|
# define IS_INITIALIZER malloc_initializer
|
2012-04-06 02:06:23 +08:00
|
|
|
static bool malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
|
|
|
/* Used to avoid initialization races. */
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
2015-06-26 04:53:58 +08:00
|
|
|
#if _WIN32_WINNT >= 0x0600
|
|
|
|
static malloc_mutex_t init_lock = SRWLOCK_INIT;
|
|
|
|
#else
|
2012-04-22 12:27:46 +08:00
|
|
|
static malloc_mutex_t init_lock;
|
2015-09-03 14:48:48 +08:00
|
|
|
static bool init_lock_initialized = false;
|
2012-04-22 12:27:46 +08:00
|
|
|
|
|
|
|
JEMALLOC_ATTR(constructor)
|
2012-04-30 18:38:31 +08:00
|
|
|
static void WINAPI
|
|
|
|
_init_init_lock(void)
|
2012-04-22 12:27:46 +08:00
|
|
|
{
|
|
|
|
|
2015-09-03 14:48:48 +08:00
|
|
|
/* If another constructor in the same binary is using mallctl to
|
|
|
|
* e.g. setup chunk hooks, it may end up running before this one,
|
|
|
|
* and malloc_init_hard will crash trying to lock the uninitialized
|
|
|
|
* lock. So we force an initialization of the lock in
|
|
|
|
* malloc_init_hard as well. We don't try to care about atomicity
|
|
|
|
* of the accessed to the init_lock_initialized boolean, since it
|
|
|
|
* really only matters early in the process creation, before any
|
|
|
|
* separate thread normally starts doing anything. */
|
|
|
|
if (!init_lock_initialized)
|
2016-04-14 14:36:15 +08:00
|
|
|
malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
|
2015-09-03 14:48:48 +08:00
|
|
|
init_lock_initialized = true;
|
2012-04-22 12:27:46 +08:00
|
|
|
}
|
2012-04-30 18:38:31 +08:00
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
# pragma section(".CRT$XCU", read)
|
|
|
|
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
|
|
|
|
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
|
|
|
|
#endif
|
2015-06-26 04:53:58 +08:00
|
|
|
#endif
|
2012-04-22 12:27:46 +08:00
|
|
|
#else
|
2012-03-22 09:33:03 +08:00
|
|
|
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
|
2012-04-22 12:27:46 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
typedef struct {
|
|
|
|
void *p; /* Input pointer (as in realloc(p, s)). */
|
|
|
|
size_t s; /* Request size. */
|
|
|
|
void *r; /* Result pointer. */
|
|
|
|
} malloc_utrace_t;
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_UTRACE
|
|
|
|
# define UTRACE(a, b, c) do { \
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(opt_utrace)) { \
|
2012-12-03 09:56:25 +08:00
|
|
|
int utrace_serrno = errno; \
|
2012-04-06 04:36:17 +08:00
|
|
|
malloc_utrace_t ut; \
|
|
|
|
ut.p = (a); \
|
|
|
|
ut.s = (b); \
|
|
|
|
ut.r = (c); \
|
|
|
|
utrace(&ut, sizeof(ut)); \
|
2012-12-03 09:56:25 +08:00
|
|
|
errno = utrace_serrno; \
|
2012-04-06 04:36:17 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
# define UTRACE(a, b, c)
|
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
2014-01-13 07:05:44 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2016-05-08 03:42:31 +08:00
|
|
|
static bool malloc_init_hard_a0(void);
|
2010-01-17 01:53:50 +08:00
|
|
|
static bool malloc_init_hard(void);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Begin miscellaneous support functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
malloc_initialized(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (malloc_init_state == malloc_init_initialized);
|
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void
|
|
|
|
malloc_thread_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TSD initialization can't be safely done as a side effect of
|
|
|
|
* deallocation, because it is possible for a thread to do nothing but
|
|
|
|
* deallocate its TLS data via free(), in which case writing to TLS
|
|
|
|
* would cause write-after-free memory corruption. The quarantine
|
|
|
|
* facility *only* gets used as a side effect of deallocation, so make
|
|
|
|
* a best effort attempt at initializing its TSD by hooking all
|
|
|
|
* allocation events.
|
|
|
|
*/
|
|
|
|
if (config_fill && unlikely(opt_quarantine))
|
|
|
|
quarantine_alloc_hook();
|
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
malloc_init_a0(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (unlikely(malloc_init_state == malloc_init_uninitialized))
|
2016-05-08 03:42:31 +08:00
|
|
|
return (malloc_init_hard_a0());
|
2015-01-21 07:37:51 +08:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
malloc_init(void)
|
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(!malloc_initialized()) && malloc_init_hard())
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (true);
|
|
|
|
malloc_thread_init();
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-08 03:42:31 +08:00
|
|
|
* The a0*() functions are used instead of i{d,}alloc() in situations that
|
2015-01-21 07:37:51 +08:00
|
|
|
* cannot tolerate TLS variable access.
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void *
|
2014-11-28 03:22:36 +08:00
|
|
|
a0ialloc(size_t size, bool zero, bool is_metadata)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(malloc_init_a0()))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (NULL);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
|
|
|
|
is_metadata, arena_get(TSDN_NULL, 0, true), true));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static void
|
2014-11-28 03:22:36 +08:00
|
|
|
a0idalloc(void *ptr, bool is_metadata)
|
2015-01-21 07:37:51 +08:00
|
|
|
{
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
arena_t *
|
|
|
|
a0get(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (a0);
|
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void *
|
2014-11-28 03:22:36 +08:00
|
|
|
a0malloc(size_t size)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (a0ialloc(size, false, true));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
void
|
|
|
|
a0dalloc(void *ptr)
|
|
|
|
{
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
a0idalloc(ptr, true);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
|
|
|
|
* situations that cannot tolerate TLS variable access (TLS allocation and very
|
|
|
|
* early internal data structure initialization).
|
|
|
|
*/
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void *
|
2015-01-21 07:37:51 +08:00
|
|
|
bootstrap_malloc(size_t size)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(size == 0))
|
|
|
|
size = 1;
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (a0ialloc(size, false, false));
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
bootstrap_calloc(size_t num, size_t size)
|
|
|
|
{
|
|
|
|
size_t num_size;
|
|
|
|
|
|
|
|
num_size = num * size;
|
|
|
|
if (unlikely(num_size == 0)) {
|
|
|
|
assert(num == 0 || size == 0);
|
|
|
|
num_size = 1;
|
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (a0ialloc(num_size, true, false));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-01-21 07:37:51 +08:00
|
|
|
bootstrap_free(void *ptr)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(ptr == NULL))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return;
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
a0idalloc(ptr, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
static void
|
|
|
|
arena_set(unsigned ind, arena_t *arena)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_write_p((void **)&arenas[ind], arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
narenas_total_set(unsigned narenas)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_write_u(&narenas_total, narenas);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
narenas_total_inc(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_add_u(&narenas_total, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
narenas_total_get(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (atomic_read_u(&narenas_total));
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
2014-10-08 15:54:16 +08:00
|
|
|
static arena_t *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_init_locked(tsdn_t *tsdn, unsigned ind)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-02-25 15:58:10 +08:00
|
|
|
assert(ind <= narenas_total_get());
|
2015-01-30 07:30:47 +08:00
|
|
|
if (ind > MALLOCX_ARENA_MAX)
|
|
|
|
return (NULL);
|
2016-02-25 15:58:10 +08:00
|
|
|
if (ind == narenas_total_get())
|
|
|
|
narenas_total_inc();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* Another thread may have already initialized arenas[ind] if it's an
|
|
|
|
* auto arena.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_get(tsdn, ind, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (arena != NULL) {
|
|
|
|
assert(ind < narenas_auto);
|
2014-10-08 15:54:16 +08:00
|
|
|
return (arena);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Actually initialize the arena. */
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_new(tsdn, ind);
|
2016-02-25 15:58:10 +08:00
|
|
|
arena_set(ind, arena);
|
2014-10-08 15:54:16 +08:00
|
|
|
return (arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
arena_t *
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_init(tsdn_t *tsdn, unsigned ind)
|
2014-10-08 15:54:16 +08:00
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arenas_lock);
|
|
|
|
arena = arena_init_locked(tsdn, ind);
|
|
|
|
malloc_mutex_unlock(tsdn, &arenas_lock);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_bind(tsd_t *tsd, unsigned ind, bool internal)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
2016-09-23 00:13:45 +08:00
|
|
|
if (!tsd_nominal(tsd))
|
|
|
|
return;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_get(tsd_tsdn(tsd), ind, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_inc(arena, internal);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2016-09-23 00:13:45 +08:00
|
|
|
if (internal)
|
|
|
|
tsd_iarena_set(tsd, arena);
|
|
|
|
else
|
|
|
|
tsd_arena_set(tsd, arena);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
|
|
|
|
{
|
|
|
|
arena_t *oldarena, *newarena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
|
|
|
|
newarena = arena_get(tsd_tsdn(tsd), newind, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_dec(oldarena, false);
|
|
|
|
arena_nthreads_inc(newarena, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd_arena_set(tsd, newarena);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_get(tsd_tsdn(tsd), ind, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_nthreads_dec(arena, internal);
|
|
|
|
if (internal)
|
|
|
|
tsd_iarena_set(tsd, NULL);
|
|
|
|
else
|
|
|
|
tsd_arena_set(tsd, NULL);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 11:37:10 +08:00
|
|
|
arena_tdata_t *
|
|
|
|
arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
2016-02-20 11:37:10 +08:00
|
|
|
arena_tdata_t *tdata, *arenas_tdata_old;
|
|
|
|
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
|
|
|
unsigned narenas_tdata_old, i;
|
|
|
|
unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
unsigned narenas_actual = narenas_total_get();
|
|
|
|
|
2016-02-20 11:37:10 +08:00
|
|
|
/*
|
|
|
|
* Dissociate old tdata array (and set up for deallocation upon return)
|
|
|
|
* if it's too small.
|
|
|
|
*/
|
|
|
|
if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
|
|
|
|
arenas_tdata_old = arenas_tdata;
|
|
|
|
narenas_tdata_old = narenas_tdata;
|
|
|
|
arenas_tdata = NULL;
|
|
|
|
narenas_tdata = 0;
|
|
|
|
tsd_arenas_tdata_set(tsd, arenas_tdata);
|
|
|
|
tsd_narenas_tdata_set(tsd, narenas_tdata);
|
|
|
|
} else {
|
|
|
|
arenas_tdata_old = NULL;
|
|
|
|
narenas_tdata_old = 0;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2016-02-20 11:37:10 +08:00
|
|
|
/* Allocate tdata array if it's missing. */
|
|
|
|
if (arenas_tdata == NULL) {
|
|
|
|
bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
|
|
|
|
narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
|
|
|
|
|
|
|
|
if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
|
|
|
|
*arenas_tdata_bypassp = true;
|
|
|
|
arenas_tdata = (arena_tdata_t *)a0malloc(
|
|
|
|
sizeof(arena_tdata_t) * narenas_tdata);
|
|
|
|
*arenas_tdata_bypassp = false;
|
2015-08-26 07:13:59 +08:00
|
|
|
}
|
2016-02-20 11:37:10 +08:00
|
|
|
if (arenas_tdata == NULL) {
|
|
|
|
tdata = NULL;
|
|
|
|
goto label_return;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
2016-02-20 11:37:10 +08:00
|
|
|
assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
|
|
|
|
tsd_arenas_tdata_set(tsd, arenas_tdata);
|
|
|
|
tsd_narenas_tdata_set(tsd, narenas_tdata);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/*
|
2016-02-20 11:37:10 +08:00
|
|
|
* Copy to tdata array. It's possible that the actual number of arenas
|
|
|
|
* has increased since narenas_total_get() was called above, but that
|
|
|
|
* causes no correctness issues unless two threads concurrently execute
|
|
|
|
* the arenas.extend mallctl, which we trust mallctl synchronization to
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* prevent.
|
|
|
|
*/
|
|
|
|
|
2016-02-20 12:09:31 +08:00
|
|
|
/* Copy/initialize tickers. */
|
|
|
|
for (i = 0; i < narenas_actual; i++) {
|
|
|
|
if (i < narenas_tdata_old) {
|
|
|
|
ticker_copy(&arenas_tdata[i].decay_ticker,
|
|
|
|
&arenas_tdata_old[i].decay_ticker);
|
|
|
|
} else {
|
|
|
|
ticker_init(&arenas_tdata[i].decay_ticker,
|
|
|
|
DECAY_NTICKS_PER_UPDATE);
|
|
|
|
}
|
|
|
|
}
|
2016-02-25 15:58:10 +08:00
|
|
|
if (narenas_tdata > narenas_actual) {
|
|
|
|
memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
|
|
|
|
* (narenas_tdata - narenas_actual));
|
|
|
|
}
|
2016-02-20 12:09:31 +08:00
|
|
|
|
2016-02-20 11:37:10 +08:00
|
|
|
/* Read the refreshed tdata array. */
|
|
|
|
tdata = &arenas_tdata[ind];
|
|
|
|
label_return:
|
|
|
|
if (arenas_tdata_old != NULL)
|
|
|
|
a0dalloc(arenas_tdata_old);
|
|
|
|
return (tdata);
|
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Slow path, called only by arena_choose(). */
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t *
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_choose_hard(tsd_t *tsd, bool internal)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
if (narenas_auto > 1) {
|
2016-04-23 05:34:14 +08:00
|
|
|
unsigned i, j, choose[2], first_null;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine binding for both non-internal and internal
|
|
|
|
* allocation.
|
|
|
|
*
|
|
|
|
* choose[0]: For application allocation.
|
|
|
|
* choose[1]: For internal metadata allocation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (j = 0; j < 2; j++)
|
|
|
|
choose[j] = 0;
|
2011-03-19 04:41:33 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
first_null = narenas_auto;
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
|
|
|
|
assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 1; i < narenas_auto; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Choose the first arena that has the lowest
|
|
|
|
* number of threads assigned to it.
|
|
|
|
*/
|
2016-04-23 05:34:14 +08:00
|
|
|
for (j = 0; j < 2; j++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_nthreads_get(arena_get(
|
|
|
|
tsd_tsdn(tsd), i, false), !!j) <
|
|
|
|
arena_nthreads_get(arena_get(
|
|
|
|
tsd_tsdn(tsd), choose[j], false),
|
|
|
|
!!j))
|
2016-04-23 05:34:14 +08:00
|
|
|
choose[j] = i;
|
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
} else if (first_null == narenas_auto) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Record the index of the first uninitialized
|
|
|
|
* arena, in case all extant arenas are in use.
|
|
|
|
*
|
|
|
|
* NB: It is possible for there to be
|
|
|
|
* discontinuities in terms of initialized
|
|
|
|
* versus uninitialized arenas, due to the
|
|
|
|
* "thread.arena" mallctl.
|
|
|
|
*/
|
|
|
|
first_null = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
for (j = 0; j < 2; j++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
|
|
|
|
choose[j], false), !!j) == 0 || first_null ==
|
|
|
|
narenas_auto) {
|
2016-04-23 05:34:14 +08:00
|
|
|
/*
|
|
|
|
* Use an unloaded arena, or the least loaded
|
|
|
|
* arena if all arenas are already initialized.
|
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
if (!!j == internal) {
|
|
|
|
ret = arena_get(tsd_tsdn(tsd),
|
|
|
|
choose[j], false);
|
|
|
|
}
|
2016-04-23 05:34:14 +08:00
|
|
|
} else {
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
/* Initialize a new arena. */
|
|
|
|
choose[j] = first_null;
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_init_locked(tsd_tsdn(tsd),
|
|
|
|
choose[j]);
|
2016-04-23 05:34:14 +08:00
|
|
|
if (arena == NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd),
|
|
|
|
&arenas_lock);
|
2016-04-23 05:34:14 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
if (!!j == internal)
|
|
|
|
ret = arena;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_bind(tsd, choose[j], !!j);
|
2011-03-19 04:41:33 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
|
2011-03-19 04:41:33 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = arena_get(tsd_tsdn(tsd), 0, false);
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_bind(tsd, 0, false);
|
|
|
|
arena_bind(tsd, 0, true);
|
2011-03-19 04:41:33 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
void
|
|
|
|
thread_allocated_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
thread_deallocated_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
2016-04-23 05:34:14 +08:00
|
|
|
void
|
|
|
|
iarena_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
arena_t *iarena;
|
|
|
|
|
|
|
|
iarena = tsd_iarena_get(tsd);
|
|
|
|
if (iarena != NULL)
|
|
|
|
arena_unbind(tsd, iarena->ind, true);
|
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
void
|
|
|
|
arena_cleanup(tsd_t *tsd)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
arena = tsd_arena_get(tsd);
|
|
|
|
if (arena != NULL)
|
2016-04-23 05:34:14 +08:00
|
|
|
arena_unbind(tsd, arena->ind, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-02-20 11:37:10 +08:00
|
|
|
arenas_tdata_cleanup(tsd_t *tsd)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
2016-02-20 11:37:10 +08:00
|
|
|
arena_tdata_t *arenas_tdata;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
|
2016-02-28 13:18:15 +08:00
|
|
|
/* Prevent tsd->arenas_tdata from being (re)created. */
|
|
|
|
*tsd_arenas_tdata_bypassp_get(tsd) = true;
|
|
|
|
|
2016-02-20 11:37:10 +08:00
|
|
|
arenas_tdata = tsd_arenas_tdata_get(tsd);
|
|
|
|
if (arenas_tdata != NULL) {
|
|
|
|
tsd_arenas_tdata_set(tsd, NULL);
|
|
|
|
a0dalloc(arenas_tdata);
|
2015-08-22 03:23:06 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-02-20 11:37:10 +08:00
|
|
|
narenas_tdata_cleanup(tsd_t *tsd)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2016-02-20 11:37:10 +08:00
|
|
|
arenas_tdata_bypass_cleanup(tsd_t *tsd)
|
2014-09-23 12:09:23 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static void
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_print_atexit(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_tcache && config_stats) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
2012-10-12 04:53:15 +08:00
|
|
|
unsigned narenas, i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* Merge stats from extant threads. This is racy, since
|
|
|
|
* individual threads do not lock when recording tcache stats
|
|
|
|
* events. As a consequence, the final stats may be slightly
|
|
|
|
* out of date by the time they are reported, if other threads
|
|
|
|
* continue to allocate.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_t *arena = arena_get(tsdn, i, false);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (arena != NULL) {
|
|
|
|
tcache_t *tcache;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* tcache_stats_merge() locks bins, so if any
|
|
|
|
* code is introduced that acquires both arena
|
|
|
|
* and bin locks in the opposite order,
|
|
|
|
* deadlocks may result.
|
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(tsdn, &arena->lock);
|
2012-02-11 12:22:09 +08:00
|
|
|
ql_foreach(tcache, &arena->tcache_ql, link) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tcache_stats_merge(tsdn, tcache, arena);
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc_stats_print(NULL, NULL, NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* End miscellaneous support functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin initialization functions.
|
|
|
|
*/
|
|
|
|
|
2014-12-10 06:41:34 +08:00
|
|
|
#ifndef JEMALLOC_HAVE_SECURE_GETENV
|
|
|
|
static char *
|
|
|
|
secure_getenv(const char *name)
|
|
|
|
{
|
|
|
|
|
2015-03-24 13:49:26 +08:00
|
|
|
# ifdef JEMALLOC_HAVE_ISSETUGID
|
|
|
|
if (issetugid() != 0)
|
2014-12-10 06:41:34 +08:00
|
|
|
return (NULL);
|
2015-03-24 13:49:26 +08:00
|
|
|
# endif
|
2014-12-10 06:41:34 +08:00
|
|
|
return (getenv(name));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
static unsigned
|
|
|
|
malloc_ncpus(void)
|
|
|
|
{
|
2009-06-24 10:01:18 +08:00
|
|
|
long result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
result = si.dwNumberOfProcessors;
|
2016-11-03 09:22:32 +08:00
|
|
|
#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
/*
|
2016-11-03 09:22:32 +08:00
|
|
|
* glibc >= 2.6 has the CPU_COUNT macro.
|
|
|
|
*
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
* glibc's sysconf() uses isspace(). glibc allocates for the first time
|
|
|
|
* *before* setting up the isspace tables. Therefore we need a
|
|
|
|
* different method to get the number of CPUs.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
cpu_set_t set;
|
|
|
|
|
|
|
|
pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
|
|
|
|
result = CPU_COUNT(&set);
|
|
|
|
}
|
2012-04-22 12:27:46 +08:00
|
|
|
#else
|
2009-06-24 10:01:18 +08:00
|
|
|
result = sysconf(_SC_NPROCESSORS_ONLN);
|
2012-09-27 04:28:29 +08:00
|
|
|
#endif
|
2013-11-30 08:19:44 +08:00
|
|
|
return ((result == -1) ? 1 : (unsigned)result);
|
2009-06-23 05:44:08 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static bool
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
|
|
|
|
char const **v_p, size_t *vlen_p)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-10-24 09:37:06 +08:00
|
|
|
bool accept;
|
|
|
|
const char *opts = *opts_p;
|
|
|
|
|
|
|
|
*k_p = opts;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
for (accept = false; !accept;) {
|
2010-10-24 09:37:06 +08:00
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
|
|
|
|
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
|
|
|
|
case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
|
|
|
|
case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
|
|
|
|
case 'Y': case 'Z':
|
|
|
|
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
|
|
|
|
case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
|
|
|
|
case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
|
|
|
|
case 's': case 't': case 'u': case 'v': case 'w': case 'x':
|
|
|
|
case 'y': case 'z':
|
|
|
|
case '0': case '1': case '2': case '3': case '4': case '5':
|
|
|
|
case '6': case '7': case '8': case '9':
|
|
|
|
case '_':
|
|
|
|
opts++;
|
|
|
|
break;
|
|
|
|
case ':':
|
|
|
|
opts++;
|
|
|
|
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
|
|
|
|
*v_p = opts;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
if (opts != *opts_p) {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with key\n");
|
|
|
|
}
|
|
|
|
return (true);
|
|
|
|
default:
|
|
|
|
malloc_write("<jemalloc>: Malformed conf string\n");
|
|
|
|
return (true);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
for (accept = false; !accept;) {
|
2010-10-24 09:37:06 +08:00
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case ',':
|
|
|
|
opts++;
|
|
|
|
/*
|
|
|
|
* Look ahead one character here, because the next time
|
|
|
|
* this function is called, it will assume that end of
|
|
|
|
* input has been cleanly reached if no input remains,
|
|
|
|
* but we have optimistically already consumed the
|
|
|
|
* comma if one exists.
|
|
|
|
*/
|
|
|
|
if (*opts == '\0') {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with comma\n");
|
|
|
|
}
|
|
|
|
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
opts++;
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
*opts_p = opts;
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
|
|
|
|
size_t vlen)
|
|
|
|
{
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
|
|
|
|
(int)vlen, v);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
static void
|
|
|
|
malloc_slow_flag_init(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Combine the runtime options into malloc_slow for fast path. Called
|
|
|
|
* after processing all the options.
|
|
|
|
*/
|
|
|
|
malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
|
|
|
|
| (opt_junk_free ? flag_opt_junk_free : 0)
|
|
|
|
| (opt_quarantine ? flag_opt_quarantine : 0)
|
|
|
|
| (opt_zero ? flag_opt_zero : 0)
|
|
|
|
| (opt_utrace ? flag_opt_utrace : 0)
|
|
|
|
| (opt_xmalloc ? flag_opt_xmalloc : 0);
|
|
|
|
|
|
|
|
if (config_valgrind)
|
|
|
|
malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
|
|
|
|
|
|
|
|
malloc_slow = (malloc_slow_flags != 0);
|
|
|
|
}
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_init(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
char buf[PATH_MAX + 1];
|
|
|
|
const char *opts, *k, *v;
|
|
|
|
size_t klen, vlen;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-16 05:48:14 +08:00
|
|
|
/*
|
|
|
|
* Automatically configure valgrind before processing options. The
|
|
|
|
* valgrind option remains in jemalloc 3.x for compatibility reasons.
|
|
|
|
*/
|
|
|
|
if (config_valgrind) {
|
2014-04-16 05:33:50 +08:00
|
|
|
in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_fill && unlikely(in_valgrind)) {
|
2014-12-09 05:12:41 +08:00
|
|
|
opt_junk = "false";
|
|
|
|
opt_junk_alloc = false;
|
|
|
|
opt_junk_free = false;
|
2014-10-04 01:16:09 +08:00
|
|
|
assert(!opt_zero);
|
2012-05-16 05:48:14 +08:00
|
|
|
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
|
|
|
|
opt_redzone = true;
|
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_tcache && unlikely(in_valgrind))
|
2012-05-16 14:31:53 +08:00
|
|
|
opt_tcache = false;
|
2012-05-16 05:48:14 +08:00
|
|
|
}
|
|
|
|
|
2016-02-08 06:23:22 +08:00
|
|
|
for (i = 0; i < 4; i++) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Get runtime configuration. */
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
2016-02-08 06:23:22 +08:00
|
|
|
opts = config_malloc_conf;
|
|
|
|
break;
|
|
|
|
case 1:
|
2012-03-02 09:19:20 +08:00
|
|
|
if (je_malloc_conf != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Use options that were compiled into the
|
|
|
|
* program.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2012-03-02 09:19:20 +08:00
|
|
|
opts = je_malloc_conf;
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2016-02-08 06:23:22 +08:00
|
|
|
case 2: {
|
2016-02-25 03:04:08 +08:00
|
|
|
ssize_t linklen = 0;
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifndef _WIN32
|
2013-09-21 01:58:11 +08:00
|
|
|
int saved_errno = errno;
|
2010-10-24 09:37:06 +08:00
|
|
|
const char *linkname =
|
2012-04-22 12:27:46 +08:00
|
|
|
# ifdef JEMALLOC_PREFIX
|
2010-10-24 09:37:06 +08:00
|
|
|
"/etc/"JEMALLOC_PREFIX"malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# else
|
2010-10-24 09:37:06 +08:00
|
|
|
"/etc/malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# endif
|
2010-10-24 09:37:06 +08:00
|
|
|
;
|
|
|
|
|
2013-09-21 01:58:11 +08:00
|
|
|
/*
|
|
|
|
* Try to use the contents of the "/etc/malloc.conf"
|
|
|
|
* symbolic link's name.
|
|
|
|
*/
|
|
|
|
linklen = readlink(linkname, buf, sizeof(buf) - 1);
|
|
|
|
if (linklen == -1) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* No configuration specified. */
|
2013-09-21 01:58:11 +08:00
|
|
|
linklen = 0;
|
2014-12-09 06:40:14 +08:00
|
|
|
/* Restore errno. */
|
2013-09-21 01:58:11 +08:00
|
|
|
set_errno(saved_errno);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2013-09-21 01:58:11 +08:00
|
|
|
#endif
|
|
|
|
buf[linklen] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2016-02-08 06:23:22 +08:00
|
|
|
} case 3: {
|
2010-10-24 09:37:06 +08:00
|
|
|
const char *envname =
|
|
|
|
#ifdef JEMALLOC_PREFIX
|
|
|
|
JEMALLOC_CPREFIX"MALLOC_CONF"
|
|
|
|
#else
|
|
|
|
"MALLOC_CONF"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2014-12-10 06:41:34 +08:00
|
|
|
if ((opts = secure_getenv(envname)) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Do nothing; opts is already initialized to
|
2010-12-18 10:07:53 +08:00
|
|
|
* the value of the MALLOC_CONF environment
|
|
|
|
* variable.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2012-03-07 06:57:45 +08:00
|
|
|
} default:
|
2013-10-22 05:56:27 +08:00
|
|
|
not_reached();
|
2009-06-24 10:01:18 +08:00
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
|
|
|
|
&vlen)) {
|
2014-04-16 07:35:08 +08:00
|
|
|
#define CONF_MATCH(n) \
|
|
|
|
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
|
2014-12-09 05:12:41 +08:00
|
|
|
#define CONF_MATCH_VALUE(n) \
|
|
|
|
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
|
2014-04-16 07:35:08 +08:00
|
|
|
#define CONF_HANDLE_BOOL(o, n, cont) \
|
|
|
|
if (CONF_MATCH(n)) { \
|
2014-12-09 05:12:41 +08:00
|
|
|
if (CONF_MATCH_VALUE("true")) \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = true; \
|
2014-12-09 05:12:41 +08:00
|
|
|
else if (CONF_MATCH_VALUE("false")) \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = false; \
|
2010-10-24 09:37:06 +08:00
|
|
|
else { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (cont) \
|
|
|
|
continue; \
|
2012-12-24 00:51:48 +08:00
|
|
|
}
|
2016-02-25 03:03:40 +08:00
|
|
|
#define CONF_HANDLE_T_U(t, o, n, min, max, clip) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2012-04-06 15:35:09 +08:00
|
|
|
uintmax_t um; \
|
2010-10-24 09:37:06 +08:00
|
|
|
char *end; \
|
|
|
|
\
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(0); \
|
2012-02-03 14:04:57 +08:00
|
|
|
um = malloc_strtoumax(v, &end, 0); \
|
2012-04-30 18:38:26 +08:00
|
|
|
if (get_errno() != 0 || (uintptr_t)end -\
|
2010-10-24 09:37:06 +08:00
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
2012-12-24 00:51:48 +08:00
|
|
|
} else if (clip) { \
|
2014-10-10 08:54:06 +08:00
|
|
|
if ((min) != 0 && um < (min)) \
|
2016-02-25 03:03:40 +08:00
|
|
|
o = (t)(min); \
|
2014-10-10 08:54:06 +08:00
|
|
|
else if (um > (max)) \
|
2016-02-25 03:03:40 +08:00
|
|
|
o = (t)(max); \
|
2012-12-24 00:51:48 +08:00
|
|
|
else \
|
2016-02-25 03:03:40 +08:00
|
|
|
o = (t)um; \
|
2012-12-24 00:51:48 +08:00
|
|
|
} else { \
|
2014-10-10 08:54:06 +08:00
|
|
|
if (((min) != 0 && um < (min)) \
|
|
|
|
|| um > (max)) { \
|
2012-12-24 00:51:48 +08:00
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range " \
|
|
|
|
"conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
2016-02-25 03:03:40 +08:00
|
|
|
o = (t)um; \
|
2012-12-24 00:51:48 +08:00
|
|
|
} \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
2016-02-25 03:03:40 +08:00
|
|
|
#define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \
|
|
|
|
CONF_HANDLE_T_U(unsigned, o, n, min, max, clip)
|
|
|
|
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
|
|
|
|
CONF_HANDLE_T_U(size_t, o, n, min, max, clip)
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
long l; \
|
|
|
|
char *end; \
|
|
|
|
\
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(0); \
|
2010-10-24 09:37:06 +08:00
|
|
|
l = strtol(v, &end, 0); \
|
2012-04-30 18:38:26 +08:00
|
|
|
if (get_errno() != 0 || (uintptr_t)end -\
|
2010-10-24 09:37:06 +08:00
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
2014-10-10 08:54:06 +08:00
|
|
|
} else if (l < (ssize_t)(min) || l > \
|
|
|
|
(ssize_t)(max)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = l; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_CHAR_P(o, n, d) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
size_t cpylen = (vlen <= \
|
2012-03-07 06:57:45 +08:00
|
|
|
sizeof(o)-1) ? vlen : \
|
|
|
|
sizeof(o)-1; \
|
|
|
|
strncpy(o, v, cpylen); \
|
|
|
|
o[cpylen] = '\0'; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
2014-10-10 08:54:06 +08:00
|
|
|
* Chunks always require at least one header page,
|
|
|
|
* as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
|
|
|
|
* possibly an additional page in the presence of
|
|
|
|
* redzones. In order to simplify options processing,
|
|
|
|
* use a conservative bound that accommodates all these
|
|
|
|
* constraints.
|
2010-10-24 09:37:06 +08:00
|
|
|
*/
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
2014-10-10 08:54:06 +08:00
|
|
|
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
|
|
|
|
(sizeof(size_t) << 3) - 1, true)
|
2012-10-12 04:53:15 +08:00
|
|
|
if (strncmp("dss", k, klen) == 0) {
|
|
|
|
int i;
|
|
|
|
bool match = false;
|
|
|
|
for (i = 0; i < dss_prec_limit; i++) {
|
|
|
|
if (strncmp(dss_prec_names[i], v, vlen)
|
|
|
|
== 0) {
|
2016-10-14 03:18:38 +08:00
|
|
|
if (chunk_dss_prec_set(i)) {
|
2012-10-12 04:53:15 +08:00
|
|
|
malloc_conf_error(
|
|
|
|
"Error setting dss",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
opt_dss =
|
|
|
|
dss_prec_names[i];
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!match) {
|
2012-10-12 04:53:15 +08:00
|
|
|
malloc_conf_error("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2016-02-25 03:03:40 +08:00
|
|
|
CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
|
|
|
|
UINT_MAX, false)
|
2016-02-20 12:09:31 +08:00
|
|
|
if (strncmp("purge", k, klen) == 0) {
|
|
|
|
int i;
|
|
|
|
bool match = false;
|
|
|
|
for (i = 0; i < purge_mode_limit; i++) {
|
|
|
|
if (strncmp(purge_mode_names[i], v,
|
|
|
|
vlen) == 0) {
|
|
|
|
opt_purge = (purge_mode_t)i;
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!match) {
|
|
|
|
malloc_conf_error("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
|
2012-03-07 06:57:45 +08:00
|
|
|
-1, (sizeof(size_t) << 3) - 1)
|
2016-02-20 12:09:31 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
|
2016-02-22 03:25:02 +08:00
|
|
|
NSTIME_SEC_MAX);
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_fill) {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (CONF_MATCH("junk")) {
|
|
|
|
if (CONF_MATCH_VALUE("true")) {
|
2016-10-13 13:58:40 +08:00
|
|
|
if (config_valgrind &&
|
|
|
|
unlikely(in_valgrind)) {
|
|
|
|
malloc_conf_error(
|
|
|
|
"Deallocation-time "
|
|
|
|
"junk filling cannot "
|
|
|
|
"be enabled while "
|
|
|
|
"running inside "
|
|
|
|
"Valgrind", k, klen, v,
|
|
|
|
vlen);
|
|
|
|
} else {
|
|
|
|
opt_junk = "true";
|
|
|
|
opt_junk_alloc = true;
|
|
|
|
opt_junk_free = true;
|
|
|
|
}
|
2014-12-09 05:12:41 +08:00
|
|
|
} else if (CONF_MATCH_VALUE("false")) {
|
|
|
|
opt_junk = "false";
|
|
|
|
opt_junk_alloc = opt_junk_free =
|
|
|
|
false;
|
|
|
|
} else if (CONF_MATCH_VALUE("alloc")) {
|
|
|
|
opt_junk = "alloc";
|
|
|
|
opt_junk_alloc = true;
|
|
|
|
opt_junk_free = false;
|
|
|
|
} else if (CONF_MATCH_VALUE("free")) {
|
2016-10-13 13:58:40 +08:00
|
|
|
if (config_valgrind &&
|
|
|
|
unlikely(in_valgrind)) {
|
|
|
|
malloc_conf_error(
|
|
|
|
"Deallocation-time "
|
|
|
|
"junk filling cannot "
|
|
|
|
"be enabled while "
|
|
|
|
"running inside "
|
|
|
|
"Valgrind", k, klen, v,
|
|
|
|
vlen);
|
|
|
|
} else {
|
|
|
|
opt_junk = "free";
|
|
|
|
opt_junk_alloc = false;
|
|
|
|
opt_junk_free = true;
|
|
|
|
}
|
2014-12-09 05:12:41 +08:00
|
|
|
} else {
|
|
|
|
malloc_conf_error(
|
|
|
|
"Invalid conf value", k,
|
|
|
|
klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
|
2012-12-24 00:51:48 +08:00
|
|
|
0, SIZE_T_MAX, false)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
|
|
|
|
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
if (config_utrace) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
|
2012-04-06 04:36:17 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
|
|
|
if (config_tcache) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_tcache, "tcache",
|
|
|
|
!config_valgrind || !in_valgrind)
|
|
|
|
if (CONF_MATCH("tcache")) {
|
|
|
|
assert(config_valgrind && in_valgrind);
|
|
|
|
if (opt_tcache) {
|
|
|
|
opt_tcache = false;
|
|
|
|
malloc_conf_error(
|
|
|
|
"tcache cannot be enabled "
|
|
|
|
"while running inside Valgrind",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_tcache_max", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
}
|
|
|
|
if (config_prof) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof, "prof", true)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
|
|
|
"prof_prefix", "jeprof")
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
|
|
|
|
true)
|
2014-10-04 14:25:30 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
|
|
|
"prof_thread_active_init", true)
|
2014-08-19 07:22:13 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_sample", 0,
|
2014-08-19 07:22:13 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1, true)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
|
|
|
|
true)
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_interval", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
|
|
|
|
true)
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
|
|
|
|
true)
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
|
|
|
|
true)
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error("Invalid conf pair", k, klen, v,
|
|
|
|
vlen);
|
2014-04-16 07:35:08 +08:00
|
|
|
#undef CONF_MATCH
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef CONF_HANDLE_BOOL
|
|
|
|
#undef CONF_HANDLE_SIZE_T
|
|
|
|
#undef CONF_HANDLE_SSIZE_T
|
|
|
|
#undef CONF_HANDLE_CHAR_P
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_init_hard_needed(void)
|
2010-10-24 09:37:06 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
|
|
|
|
malloc_init_recursible)) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
|
|
|
* Another thread initialized the allocator before this one
|
|
|
|
* acquired init_lock, or this thread is the initializing
|
|
|
|
* thread, and it is recursively allocating.
|
|
|
|
*/
|
|
|
|
return (false);
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2014-10-04 01:16:09 +08:00
|
|
|
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
|
2016-10-14 05:47:50 +08:00
|
|
|
spin_t spinner;
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
/* Busy-wait until the initializing thread completes. */
|
2016-10-14 05:47:50 +08:00
|
|
|
spin_init(&spinner);
|
2010-10-24 09:37:06 +08:00
|
|
|
do {
|
2016-05-13 12:07:08 +08:00
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
2016-10-14 05:47:50 +08:00
|
|
|
spin_adaptive(&spinner);
|
2016-05-13 12:07:08 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
} while (!malloc_initialized());
|
2010-10-24 09:37:06 +08:00
|
|
|
return (false);
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
return (true);
|
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_init_hard_a0_locked()
|
2015-01-21 07:37:51 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_initializer = INITIALIZER;
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
prof_boot0();
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_init();
|
2010-01-04 04:10:42 +08:00
|
|
|
if (opt_stats_print) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Print statistics at exit. */
|
2010-01-30 06:30:41 +08:00
|
|
|
if (atexit(stats_print_atexit) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in atexit()\n");
|
2010-01-30 06:30:41 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2016-05-06 08:45:02 +08:00
|
|
|
pages_boot();
|
2015-01-21 07:37:51 +08:00
|
|
|
if (base_boot())
|
2010-01-30 06:30:41 +08:00
|
|
|
return (true);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (chunk_boot())
|
2010-01-28 05:10:55 +08:00
|
|
|
return (true);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (ctl_boot())
|
2012-02-03 14:04:57 +08:00
|
|
|
return (true);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
prof_boot1();
|
2016-04-09 05:16:19 +08:00
|
|
|
arena_boot();
|
2016-05-11 13:21:10 +08:00
|
|
|
if (config_tcache && tcache_boot(TSDN_NULL))
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
return (true);
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
|
2011-11-04 09:40:03 +08:00
|
|
|
return (true);
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Create enough scaffolding to allow recursive allocation in
|
|
|
|
* malloc_ncpus().
|
|
|
|
*/
|
2016-02-25 15:58:10 +08:00
|
|
|
narenas_auto = 1;
|
|
|
|
narenas_total_set(narenas_auto);
|
2015-01-21 07:37:51 +08:00
|
|
|
arenas = &a0;
|
2012-10-12 04:53:15 +08:00
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Initialize one arena here. The rest are lazily created in
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* arena_choose_hard().
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
if (arena_init(TSDN_NULL, 0) == NULL)
|
2016-05-08 03:42:31 +08:00
|
|
|
return (true);
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_init_state = malloc_init_a0_initialized;
|
2016-05-08 03:42:31 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
return (false);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
2016-05-08 03:42:31 +08:00
|
|
|
malloc_init_hard_a0(void)
|
2015-01-21 07:37:51 +08:00
|
|
|
{
|
|
|
|
bool ret;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &init_lock);
|
|
|
|
ret = malloc_init_hard_a0_locked();
|
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
/* Initialize data structures which may trigger recursive allocation. */
|
2016-01-12 03:05:00 +08:00
|
|
|
static bool
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_init_hard_recursible(void)
|
2015-01-21 07:37:51 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_init_state = malloc_init_recursible;
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
ncpus = malloc_ncpus();
|
2013-10-22 05:11:09 +08:00
|
|
|
|
|
|
|
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
|
2014-05-29 10:47:15 +08:00
|
|
|
&& !defined(_WIN32) && !defined(__native_client__))
|
2016-01-12 03:05:00 +08:00
|
|
|
/* LinuxThreads' pthread_atfork() allocates. */
|
2013-10-22 05:11:09 +08:00
|
|
|
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
|
|
|
jemalloc_postfork_child) != 0) {
|
|
|
|
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
|
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
2016-05-08 03:42:31 +08:00
|
|
|
return (true);
|
2013-10-22 05:11:09 +08:00
|
|
|
}
|
|
|
|
#endif
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2016-05-08 03:42:31 +08:00
|
|
|
return (false);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_init_hard_finish(tsdn_t *tsdn)
|
2015-01-21 07:37:51 +08:00
|
|
|
{
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
if (malloc_mutex_boot())
|
2012-04-03 23:47:07 +08:00
|
|
|
return (true);
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
if (opt_narenas == 0) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2009-12-29 16:09:15 +08:00
|
|
|
* For SMP systems, create more than one arena per CPU by
|
|
|
|
* default.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
if (ncpus > 1)
|
|
|
|
opt_narenas = ncpus << 2;
|
|
|
|
else
|
|
|
|
opt_narenas = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_auto = opt_narenas;
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
2016-02-25 15:58:10 +08:00
|
|
|
* Limit the number of arenas to the indexing range of MALLOCX_ARENA().
|
2010-10-24 09:37:06 +08:00
|
|
|
*/
|
2016-02-25 15:58:10 +08:00
|
|
|
if (narenas_auto > MALLOCX_ARENA_MAX) {
|
|
|
|
narenas_auto = MALLOCX_ARENA_MAX;
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_auto);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2016-02-25 15:58:10 +08:00
|
|
|
narenas_total_set(narenas_auto);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/* Allocate and initialize arenas. */
|
2016-05-11 13:21:10 +08:00
|
|
|
arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
|
2016-02-25 15:58:10 +08:00
|
|
|
(MALLOCX_ARENA_MAX+1));
|
2015-01-21 07:37:51 +08:00
|
|
|
if (arenas == NULL)
|
2009-06-23 03:08:42 +08:00
|
|
|
return (true);
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Copy the pointer to the one arena that was already initialized. */
|
2016-02-25 15:58:10 +08:00
|
|
|
arena_set(0, a0);
|
2015-01-21 07:37:51 +08:00
|
|
|
|
|
|
|
malloc_init_state = malloc_init_initialized;
|
2015-10-28 06:12:10 +08:00
|
|
|
malloc_slow_flag_init();
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
malloc_init_hard(void)
|
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
2015-01-21 07:37:51 +08:00
|
|
|
|
2015-09-03 14:48:48 +08:00
|
|
|
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
|
|
|
|
_init_init_lock();
|
|
|
|
#endif
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_lock(TSDN_NULL, &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (!malloc_init_hard_needed()) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (malloc_init_state != malloc_init_a0_initialized &&
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_init_hard_a0_locked()) {
|
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
return (true);
|
|
|
|
}
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(TSDN_NULL, &init_lock);
|
|
|
|
/* Recursive allocation relies on functional tsd. */
|
|
|
|
tsd = malloc_tsd_boot0();
|
|
|
|
if (tsd == NULL)
|
2015-01-21 07:37:51 +08:00
|
|
|
return (true);
|
2016-05-11 13:21:10 +08:00
|
|
|
if (malloc_init_hard_recursible())
|
|
|
|
return (true);
|
|
|
|
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
|
2016-01-12 03:05:00 +08:00
|
|
|
|
2016-10-21 14:59:12 +08:00
|
|
|
if (config_prof && prof_boot2(tsd)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
|
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
malloc_tsd_boot1();
|
2009-06-23 03:08:42 +08:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* End initialization functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2016-05-07 03:16:00 +08:00
|
|
|
ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
|
2015-10-28 06:12:10 +08:00
|
|
|
prof_tctx_t *tctx, bool slow_path)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2015-10-28 06:12:10 +08:00
|
|
|
szind_t ind_large = size2index(LARGE_MINCLASS);
|
2016-05-07 03:16:00 +08:00
|
|
|
p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else
|
2016-05-07 03:16:00 +08:00
|
|
|
p = ialloc(tsd, usize, ind, zero, slow_path);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2016-05-07 03:16:00 +08:00
|
|
|
ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2016-05-07 03:16:00 +08:00
|
|
|
p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2016-05-07 03:16:00 +08:00
|
|
|
p = ialloc(tsd, usize, ind, zero, slow_path);
|
2014-10-31 14:18:45 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
/*
|
|
|
|
* ialloc_body() is inlined so that fast and slow paths are generated separately
|
|
|
|
* with statically known slow_path.
|
2016-05-11 13:21:10 +08:00
|
|
|
*
|
|
|
|
* This function guarantees that *tsdn is non-NULL on success.
|
2016-05-07 03:16:00 +08:00
|
|
|
*/
|
2014-04-23 09:41:15 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2016-05-11 13:21:10 +08:00
|
|
|
ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
|
|
|
|
bool slow_path)
|
2014-04-23 09:41:15 +08:00
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
2015-10-28 06:12:10 +08:00
|
|
|
szind_t ind;
|
2014-04-23 09:41:15 +08:00
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
if (slow_path && unlikely(malloc_init())) {
|
2016-05-11 13:21:10 +08:00
|
|
|
*tsdn = NULL;
|
2014-04-23 09:41:15 +08:00
|
|
|
return (NULL);
|
2016-05-07 03:16:00 +08:00
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
*tsdn = tsd_tsdn(tsd);
|
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
ind = size2index(size);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(ind >= NSIZES))
|
|
|
|
return (NULL);
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2016-02-26 07:29:49 +08:00
|
|
|
if (config_stats || (config_prof && opt_prof) || (slow_path &&
|
|
|
|
config_valgrind && unlikely(in_valgrind))) {
|
2015-10-28 06:12:10 +08:00
|
|
|
*usize = index2size(ind);
|
2016-02-26 07:29:49 +08:00
|
|
|
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
2014-04-23 09:41:15 +08:00
|
|
|
|
2016-02-26 07:29:49 +08:00
|
|
|
if (config_prof && opt_prof)
|
2016-05-11 13:21:10 +08:00
|
|
|
return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
|
2014-04-23 09:41:15 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
return (ialloc(tsd, size, ind, zero, slow_path));
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void
|
2016-05-11 13:21:10 +08:00
|
|
|
ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
|
2016-05-07 03:16:00 +08:00
|
|
|
bool update_errno, bool slow_path)
|
2015-10-28 06:12:10 +08:00
|
|
|
{
|
2016-05-07 03:16:00 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(!tsdn_null(tsdn) || ret == NULL);
|
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
|
2016-05-07 03:16:00 +08:00
|
|
|
malloc_printf("<jemalloc>: Error in %s(): out of "
|
|
|
|
"memory\n", func);
|
2015-10-28 06:12:10 +08:00
|
|
|
abort();
|
|
|
|
}
|
2016-05-07 03:16:00 +08:00
|
|
|
if (update_errno)
|
|
|
|
set_errno(ENOMEM);
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
|
|
|
if (config_stats && likely(ret != NULL)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(usize == isalloc(tsdn, ret, config_prof));
|
|
|
|
*tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
2014-04-23 09:41:15 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc(size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-29 12:31:37 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
if (likely(!malloc_slow)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ialloc_body(size, false, &tsdn, &usize, false);
|
|
|
|
ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
|
2015-10-28 06:12:10 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ialloc_body(size, false, &tsdn, &usize, true);
|
|
|
|
ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
|
2015-10-28 06:12:10 +08:00
|
|
|
UTRACE(0, size, ret);
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
|
|
|
|
prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-09-08 05:40:19 +08:00
|
|
|
assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
|
2015-06-24 09:47:07 +08:00
|
|
|
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = ipalloc(tsd, usize, alignment, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imemalign_prof_sample(tsd, alignment, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = ipalloc(tsd, usize, alignment, false);
|
2014-10-31 14:18:45 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2011-08-13 04:48:27 +08:00
|
|
|
static int
|
2014-01-13 07:05:44 +08:00
|
|
|
imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2012-02-11 12:22:09 +08:00
|
|
|
size_t usize;
|
2011-03-23 15:37:29 +08:00
|
|
|
void *result;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 03:55:21 +08:00
|
|
|
assert(min_alignment != 0);
|
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = NULL;
|
2009-06-23 03:08:42 +08:00
|
|
|
result = NULL;
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_oom;
|
2015-06-23 09:48:58 +08:00
|
|
|
}
|
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2015-06-23 09:48:58 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-06-23 09:48:58 +08:00
|
|
|
/* Make sure that alignment is a large enough power of 2. */
|
|
|
|
if (unlikely(((alignment - 1) & alignment) != 0
|
|
|
|
|| (alignment < min_alignment))) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
|
|
|
malloc_write("<jemalloc>: Error allocating "
|
|
|
|
"aligned memory: invalid alignment\n");
|
|
|
|
abort();
|
2011-03-23 15:37:29 +08:00
|
|
|
}
|
2015-06-23 09:48:58 +08:00
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
|
|
|
goto label_return;
|
|
|
|
}
|
2011-03-23 15:37:29 +08:00
|
|
|
|
2015-06-23 09:48:58 +08:00
|
|
|
usize = sa2u(size, alignment);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
|
2015-06-23 09:48:58 +08:00
|
|
|
result = NULL;
|
|
|
|
goto label_oom;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2015-06-23 09:48:58 +08:00
|
|
|
if (config_prof && opt_prof)
|
|
|
|
result = imemalign_prof(tsd, alignment, usize);
|
|
|
|
else
|
|
|
|
result = ipalloc(tsd, usize, alignment, false);
|
|
|
|
if (unlikely(result == NULL))
|
|
|
|
goto label_oom;
|
|
|
|
assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
*memptr = result;
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats && likely(result != NULL)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, result);
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
|
|
|
|
false);
|
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
2014-01-13 07:05:44 +08:00
|
|
|
label_oom:
|
|
|
|
assert(result == NULL);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2014-01-13 07:05:44 +08:00
|
|
|
malloc_write("<jemalloc>: Error allocating aligned memory: "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
ret = ENOMEM;
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2012-03-02 09:19:20 +08:00
|
|
|
je_posix_memalign(void **memptr, size_t alignment, size_t size)
|
2011-08-13 04:48:27 +08:00
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = imemalign(memptr, alignment, size, sizeof(void *));
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
return (ret);
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
|
2012-03-14 03:55:21 +08:00
|
|
|
je_aligned_alloc(size_t alignment, size_t size)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
int err;
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
|
2012-03-14 03:55:21 +08:00
|
|
|
ret = NULL;
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(err);
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2012-03-14 03:55:21 +08:00
|
|
|
return (ret);
|
2011-08-13 04:48:27 +08:00
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
|
2012-03-02 09:19:20 +08:00
|
|
|
je_calloc(size_t num, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
2009-06-23 03:08:42 +08:00
|
|
|
size_t num_size;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
num_size = num * size;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(num_size == 0)) {
|
2012-02-29 12:31:37 +08:00
|
|
|
if (num == 0 || size == 0)
|
2009-06-23 03:08:42 +08:00
|
|
|
num_size = 1;
|
2016-05-07 03:16:00 +08:00
|
|
|
else
|
|
|
|
num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Try to avoid division here. We know that it isn't possible to
|
|
|
|
* overflow during multiplication if neither operand uses any of the
|
|
|
|
* most significant half of the bits in a size_t.
|
|
|
|
*/
|
2014-09-12 07:20:44 +08:00
|
|
|
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
|
2016-05-07 03:16:00 +08:00
|
|
|
2))) && (num_size / size != num)))
|
|
|
|
num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
if (likely(!malloc_slow)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ialloc_body(num_size, true, &tsdn, &usize, false);
|
|
|
|
ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ialloc_body(num_size, true, &tsdn, &usize, true);
|
|
|
|
ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
|
2016-05-07 03:16:00 +08:00
|
|
|
UTRACE(0, num_size, ret);
|
2016-06-08 05:27:24 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2015-09-15 14:28:32 +08:00
|
|
|
irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
2014-10-25 01:18:57 +08:00
|
|
|
prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2015-09-15 14:28:32 +08:00
|
|
|
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else
|
2015-09-15 14:28:32 +08:00
|
|
|
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2015-09-15 14:28:32 +08:00
|
|
|
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2015-09-15 14:17:25 +08:00
|
|
|
bool prof_active;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *old_tctx, *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
prof_active = prof_active_get_unlocked();
|
2016-05-11 13:21:10 +08:00
|
|
|
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
|
2015-09-15 14:17:25 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2015-09-15 14:28:32 +08:00
|
|
|
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2015-09-15 14:28:32 +08:00
|
|
|
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
|
2015-09-15 13:45:31 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2015-09-15 13:45:31 +08:00
|
|
|
}
|
2015-09-15 14:48:11 +08:00
|
|
|
prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
|
2015-09-15 14:17:25 +08:00
|
|
|
old_tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE_C void
|
2015-10-28 06:12:10 +08:00
|
|
|
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
if (config_prof && opt_prof) {
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_free(tsd, ptr, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else if (config_stats || config_valgrind)
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_stats)
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_deallocatedp_get(tsd) += usize;
|
2015-10-28 06:12:10 +08:00
|
|
|
|
|
|
|
if (likely(!slow_path))
|
|
|
|
iqalloc(tsd, ptr, tcache, false);
|
|
|
|
else {
|
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2016-05-11 13:21:10 +08:00
|
|
|
rzsize = p2rz(tsd_tsdn(tsd), ptr);
|
2015-10-28 06:12:10 +08:00
|
|
|
iqalloc(tsd, ptr, tcache, true);
|
|
|
|
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
2014-08-29 03:41:48 +08:00
|
|
|
JEMALLOC_INLINE_C void
|
2016-05-07 03:16:00 +08:00
|
|
|
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
|
2014-08-29 03:41:48 +08:00
|
|
|
{
|
|
|
|
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2014-08-29 03:41:48 +08:00
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
if (config_prof && opt_prof)
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_free(tsd, ptr, usize);
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_stats)
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_deallocatedp_get(tsd) += usize;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2016-05-11 13:21:10 +08:00
|
|
|
rzsize = p2rz(tsd_tsdn(tsd), ptr);
|
2016-05-07 03:16:00 +08:00
|
|
|
isqalloc(tsd, ptr, usize, tcache, slow_path);
|
2014-08-29 03:41:48 +08:00
|
|
|
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ALLOC_SIZE(2)
|
2012-03-02 09:19:20 +08:00
|
|
|
je_realloc(void *ptr, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2013-12-16 08:21:30 +08:00
|
|
|
size_t old_usize = 0;
|
2013-12-11 05:51:52 +08:00
|
|
|
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(size == 0)) {
|
2012-02-29 12:24:05 +08:00
|
|
|
if (ptr != NULL) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
|
|
|
UTRACE(ptr, 0, 0);
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2015-10-28 06:12:10 +08:00
|
|
|
ifree(tsd, ptr, tcache_get(tsd, false), true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(ptr != NULL)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2013-01-31 07:03:11 +08:00
|
|
|
malloc_thread_init();
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
|
|
|
|
|
|
|
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
2016-04-14 14:36:15 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
|
2016-04-14 14:36:15 +08:00
|
|
|
u2rz(old_usize);
|
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_prof && opt_prof) {
|
|
|
|
usize = s2u(size);
|
2016-02-26 07:29:49 +08:00
|
|
|
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
|
|
|
|
NULL : irealloc_prof(tsd, ptr, old_usize, usize);
|
2014-10-05 02:12:53 +08:00
|
|
|
} else {
|
|
|
|
if (config_stats || (config_valgrind &&
|
|
|
|
unlikely(in_valgrind)))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(size);
|
2014-10-25 01:18:57 +08:00
|
|
|
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
|
2014-10-05 02:12:53 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsd_tsdn(tsd);
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
2012-02-29 12:24:05 +08:00
|
|
|
/* realloc(NULL, size) is equivalent to malloc(size). */
|
2015-10-28 06:12:10 +08:00
|
|
|
if (likely(!malloc_slow))
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ialloc_body(size, false, &tsdn, &usize, false);
|
2015-10-28 06:12:10 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ialloc_body(size, false, &tsdn, &usize, true);
|
|
|
|
assert(!tsdn_null(tsdn) || ret == NULL);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2014-01-13 07:05:44 +08:00
|
|
|
malloc_write("<jemalloc>: Error in realloc(): "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats && likely(ret != NULL)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
|
|
|
|
|
|
|
assert(usize == isalloc(tsdn, ret, config_prof));
|
|
|
|
tsd = tsdn_tsd(tsdn);
|
2014-10-05 02:12:53 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
|
|
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, size, ret);
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
|
2014-04-16 07:35:08 +08:00
|
|
|
old_rzsize, true, false);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
2012-03-02 09:19:20 +08:00
|
|
|
je_free(void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
if (likely(ptr != NULL)) {
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2015-10-28 06:12:10 +08:00
|
|
|
if (likely(!malloc_slow))
|
|
|
|
ifree(tsd, ptr, tcache_get(tsd, false), false);
|
|
|
|
else
|
|
|
|
ifree(tsd, ptr, tcache_get(tsd, false), true);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2015-01-30 07:30:47 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard override functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-11 05:33:00 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2012-03-02 09:19:20 +08:00
|
|
|
je_memalign(size_t alignment, size_t size)
|
2010-09-21 07:44:23 +08:00
|
|
|
{
|
2012-03-24 06:39:07 +08:00
|
|
|
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2015-01-17 10:04:17 +08:00
|
|
|
if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
|
|
|
|
ret = NULL;
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-11 05:33:00 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2012-03-02 09:19:20 +08:00
|
|
|
je_valloc(size_t size)
|
2010-09-21 07:44:23 +08:00
|
|
|
{
|
2012-03-24 06:39:07 +08:00
|
|
|
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2015-01-17 10:04:17 +08:00
|
|
|
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
|
|
|
|
ret = NULL;
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-26 23:46:57 +08:00
|
|
|
/*
|
|
|
|
* is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
|
|
|
|
* #define je_malloc malloc
|
|
|
|
*/
|
|
|
|
#define malloc_is_malloc 1
|
|
|
|
#define is_malloc_(a) malloc_is_ ## a
|
|
|
|
#define is_malloc(a) is_malloc_(a)
|
|
|
|
|
2014-08-19 04:06:39 +08:00
|
|
|
#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
|
2012-03-01 02:37:27 +08:00
|
|
|
/*
|
|
|
|
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
|
|
|
|
* to inconsistently reference libc's malloc(3)-compatible functions
|
|
|
|
* (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
|
|
|
|
*
|
2012-03-27 20:20:13 +08:00
|
|
|
* These definitions interpose hooks in glibc. The functions are actually
|
2012-03-01 02:37:27 +08:00
|
|
|
* passed an extra argument for the caller return address, which will be
|
|
|
|
* ignored.
|
|
|
|
*/
|
2014-05-02 06:51:30 +08:00
|
|
|
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
|
|
|
|
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
|
|
|
|
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
|
2014-08-19 04:06:39 +08:00
|
|
|
# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
|
2014-05-02 06:51:30 +08:00
|
|
|
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
|
2012-04-30 18:38:29 +08:00
|
|
|
je_memalign;
|
2014-08-19 04:06:39 +08:00
|
|
|
# endif
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
|
2016-11-03 09:22:32 +08:00
|
|
|
#ifdef CPU_COUNT
|
Support static linking of jemalloc with glibc
glibc defines its malloc implementation with several weak and strong
symbols:
strong_alias (__libc_calloc, __calloc) weak_alias (__libc_calloc, calloc)
strong_alias (__libc_free, __cfree) weak_alias (__libc_free, cfree)
strong_alias (__libc_free, __free) strong_alias (__libc_free, free)
strong_alias (__libc_malloc, __malloc) strong_alias (__libc_malloc, malloc)
The issue is not with the weak symbols, but that other parts of glibc
depend on __libc_malloc explicitly. Defining them in terms of jemalloc
API's allows the linker to drop glibc's malloc.o completely from the link,
and static linking no longer results in symbol collisions.
Another wrinkle: jemalloc during initialization calls sysconf to
get the number of CPU's. GLIBC allocates for the first time before
setting up isspace (and other related) tables, which are used by
sysconf. Instead, use the pthread API to get the number of
CPUs with GLIBC, which seems to work.
This resolves #442.
2016-10-29 04:51:52 +08:00
|
|
|
/*
|
|
|
|
* To enable static linking with glibc, the libc specific malloc interface must
|
|
|
|
* be implemented also, so none of glibc's malloc.o functions are added to the
|
|
|
|
* link.
|
|
|
|
*/
|
|
|
|
#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
|
|
|
|
/* To force macro expansion of je_ prefix before stringification. */
|
|
|
|
#define PREALIAS(je_fn) ALIAS(je_fn)
|
|
|
|
void *__libc_malloc(size_t size) PREALIAS(je_malloc);
|
|
|
|
void __libc_free(void* ptr) PREALIAS(je_free);
|
|
|
|
void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
|
|
|
|
void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
|
|
|
|
void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
|
|
|
|
void *__libc_valloc(size_t size) PREALIAS(je_valloc);
|
|
|
|
int __posix_memalign(void** r, size_t a, size_t s)
|
|
|
|
PREALIAS(je_posix_memalign);
|
|
|
|
#undef PREALIAS
|
|
|
|
#undef ALIAS
|
2016-11-03 09:22:32 +08:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-03-01 02:37:27 +08:00
|
|
|
#endif
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* End non-standard override functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard functions.
|
|
|
|
*/
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
2016-05-07 03:16:00 +08:00
|
|
|
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
|
2014-09-08 05:40:19 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
|
|
|
|
*alignment = 0;
|
|
|
|
*usize = s2u(size);
|
|
|
|
} else {
|
|
|
|
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
|
|
|
|
*usize = sa2u(size, *alignment);
|
|
|
|
}
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
|
|
|
|
return (true);
|
2014-09-08 05:40:19 +08:00
|
|
|
*zero = MALLOCX_ZERO_GET(flags);
|
2015-01-30 07:30:47 +08:00
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
*tcache = NULL;
|
|
|
|
else
|
|
|
|
*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
|
|
|
} else
|
|
|
|
*tcache = tcache_get(tsd, true);
|
2014-09-08 05:40:19 +08:00
|
|
|
if ((flags & MALLOCX_ARENA_MASK) != 0) {
|
|
|
|
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
2016-05-11 13:21:10 +08:00
|
|
|
*arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (unlikely(*arena == NULL))
|
|
|
|
return (true);
|
2015-01-30 07:30:47 +08:00
|
|
|
} else
|
2014-09-08 05:40:19 +08:00
|
|
|
*arena = NULL;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (false);
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
2013-10-20 08:20:18 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2016-05-11 13:21:10 +08:00
|
|
|
imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
2016-05-07 03:16:00 +08:00
|
|
|
tcache_t *tcache, arena_t *arena, bool slow_path)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
2015-10-28 06:12:10 +08:00
|
|
|
szind_t ind;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2015-09-18 01:19:28 +08:00
|
|
|
if (unlikely(alignment != 0))
|
2016-05-11 13:21:10 +08:00
|
|
|
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
|
2016-02-26 08:42:15 +08:00
|
|
|
ind = size2index(usize);
|
|
|
|
assert(ind < NSIZES);
|
2016-05-11 13:21:10 +08:00
|
|
|
return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
|
2016-05-07 03:16:00 +08:00
|
|
|
slow_path));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2016-05-11 13:21:10 +08:00
|
|
|
imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
2016-05-07 03:16:00 +08:00
|
|
|
tcache_t *tcache, arena_t *arena, bool slow_path)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-09-08 05:40:19 +08:00
|
|
|
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
|
|
|
|
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
|
2016-05-11 13:21:10 +08:00
|
|
|
p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
|
|
|
|
tcache, arena, slow_path);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_promoted(tsdn, p, usize);
|
2016-05-07 03:16:00 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
|
2016-05-07 03:16:00 +08:00
|
|
|
slow_path);
|
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2016-05-07 03:16:00 +08:00
|
|
|
imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment;
|
|
|
|
bool zero;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
2014-09-08 05:40:19 +08:00
|
|
|
arena_t *arena;
|
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
&zero, &tcache, &arena)))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (NULL);
|
2015-09-15 14:17:25 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
|
2016-05-11 13:21:10 +08:00
|
|
|
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
|
|
|
p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
|
|
|
|
tcache, arena, slow_path);
|
|
|
|
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
|
|
|
p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
|
|
|
|
tcache, arena, slow_path);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else
|
2014-09-08 05:40:19 +08:00
|
|
|
p = NULL;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-06-23 09:48:58 +08:00
|
|
|
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
2014-01-13 07:05:44 +08:00
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2014-09-08 05:40:19 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2016-05-07 03:16:00 +08:00
|
|
|
imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
|
|
|
bool slow_path)
|
2014-09-08 05:40:19 +08:00
|
|
|
{
|
2015-06-23 09:48:58 +08:00
|
|
|
void *p;
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment;
|
|
|
|
bool zero;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
2014-09-08 05:40:19 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
|
|
|
&zero, &tcache, &arena)))
|
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
|
|
|
|
arena, slow_path);
|
2016-05-07 03:16:00 +08:00
|
|
|
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
/* This function guarantees that *tsdn is non-NULL on success. */
|
2016-05-07 03:16:00 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2016-05-11 13:21:10 +08:00
|
|
|
imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
|
2016-05-07 03:16:00 +08:00
|
|
|
bool slow_path)
|
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd_t *tsd;
|
2016-05-07 03:16:00 +08:00
|
|
|
|
|
|
|
if (slow_path && unlikely(malloc_init())) {
|
2016-05-11 13:21:10 +08:00
|
|
|
*tsdn = NULL;
|
2016-05-07 03:16:00 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
*tsdn = tsd_tsdn(tsd);
|
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-05-07 03:16:00 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(flags == 0)) {
|
2015-10-28 06:12:10 +08:00
|
|
|
szind_t ind = size2index(size);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(ind >= NSIZES))
|
|
|
|
return (NULL);
|
2016-05-07 03:16:00 +08:00
|
|
|
if (config_stats || (config_prof && opt_prof) || (slow_path &&
|
|
|
|
config_valgrind && unlikely(in_valgrind))) {
|
2015-10-28 06:12:10 +08:00
|
|
|
*usize = index2size(ind);
|
2016-02-26 07:29:49 +08:00
|
|
|
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
|
|
|
|
}
|
2016-05-07 03:16:00 +08:00
|
|
|
|
|
|
|
if (config_prof && opt_prof) {
|
2016-05-11 13:21:10 +08:00
|
|
|
return (ialloc_prof(tsd, *usize, ind, false,
|
2016-05-07 03:16:00 +08:00
|
|
|
slow_path));
|
|
|
|
}
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
return (ialloc(tsd, size, ind, false, slow_path));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
if (config_prof && opt_prof)
|
2016-05-11 13:21:10 +08:00
|
|
|
return (imallocx_prof(tsd, size, flags, usize, slow_path));
|
2016-05-07 03:16:00 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_mallocx(size_t size, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
void *p;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
2016-05-07 03:16:00 +08:00
|
|
|
if (likely(!malloc_slow)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
p = imallocx_body(size, flags, &tsdn, &usize, false);
|
|
|
|
ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
|
2016-05-07 03:16:00 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
p = imallocx_body(size, flags, &tsdn, &usize, true);
|
|
|
|
ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
|
2016-05-07 03:16:00 +08:00
|
|
|
UTRACE(0, size, p);
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
|
2016-05-07 03:16:00 +08:00
|
|
|
MALLOCX_ZERO_GET(flags));
|
2016-04-14 14:36:15 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (p);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2015-09-18 01:17:55 +08:00
|
|
|
irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
|
|
|
|
size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
|
2015-01-30 07:30:47 +08:00
|
|
|
prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2015-09-15 14:28:32 +08:00
|
|
|
p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena_prof_promoted(tsd_tsdn(tsd), p, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else {
|
2015-09-18 01:17:55 +08:00
|
|
|
p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2015-09-15 14:28:32 +08:00
|
|
|
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
|
|
|
arena_t *arena)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2015-09-15 14:17:25 +08:00
|
|
|
bool prof_active;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *old_tctx, *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
prof_active = prof_active_get_unlocked();
|
2016-05-11 13:21:10 +08:00
|
|
|
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
|
2016-06-02 07:19:22 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
2015-09-18 01:17:55 +08:00
|
|
|
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
|
|
|
|
alignment, zero, tcache, arena, tctx);
|
2014-09-10 10:37:26 +08:00
|
|
|
} else {
|
2015-09-15 14:28:32 +08:00
|
|
|
p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2016-06-02 07:19:22 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-09-15 14:28:32 +08:00
|
|
|
if (p == old_ptr && alignment != 0) {
|
2014-01-13 07:05:44 +08:00
|
|
|
/*
|
|
|
|
* The allocation did not move, so it is possible that the size
|
|
|
|
* class is smaller than would guarantee the requested
|
|
|
|
* alignment, and that the alignment constraint was
|
|
|
|
* serendipitously satisfied. Additionally, old_usize may not
|
|
|
|
* be the same as the current usize because of in-place large
|
|
|
|
* reallocation. Therefore, query the actual value of usize.
|
|
|
|
*/
|
2016-05-11 13:21:10 +08:00
|
|
|
*usize = isalloc(tsd_tsdn(tsd), p, config_prof);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2016-06-02 07:19:22 +08:00
|
|
|
prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
|
2015-09-15 14:17:25 +08:00
|
|
|
old_usize, old_tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2015-07-28 04:48:27 +08:00
|
|
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
|
|
|
void JEMALLOC_NOTHROW *
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_ALLOC_SIZE(2)
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_rallocx(void *ptr, size_t size, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
void *p;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2014-09-12 07:20:44 +08:00
|
|
|
size_t usize;
|
2014-10-25 01:18:57 +08:00
|
|
|
size_t old_usize;
|
2013-12-11 05:51:52 +08:00
|
|
|
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
bool zero = flags & MALLOCX_ZERO;
|
|
|
|
arena_t *arena;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_thread_init();
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
2014-09-08 05:40:19 +08:00
|
|
|
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
2016-05-11 13:21:10 +08:00
|
|
|
arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (unlikely(arena == NULL))
|
|
|
|
goto label_oom;
|
2015-01-30 07:30:47 +08:00
|
|
|
} else
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
arena = NULL;
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
tcache = NULL;
|
|
|
|
else
|
|
|
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
|
|
|
} else
|
|
|
|
tcache = tcache_get(tsd, true);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2014-01-13 07:05:44 +08:00
|
|
|
old_rzsize = u2rz(old_usize);
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2014-01-13 07:05:44 +08:00
|
|
|
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
|
|
|
|
goto label_oom;
|
2014-09-23 12:09:23 +08:00
|
|
|
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
goto label_oom;
|
|
|
|
} else {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache, arena);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
goto label_oom;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = isalloc(tsd_tsdn(tsd), p, config_prof);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
2015-06-23 09:48:58 +08:00
|
|
|
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
|
|
|
if (config_stats) {
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
|
|
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
UTRACE(ptr, size, p);
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
|
|
|
|
old_usize, old_rzsize, false, zero);
|
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (p);
|
|
|
|
label_oom:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
UTRACE(ptr, size, 0);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C size_t
|
2016-05-11 13:21:10 +08:00
|
|
|
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t extra, size_t alignment, bool zero)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
|
2014-01-13 07:05:44 +08:00
|
|
|
return (old_usize);
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = isalloc(tsdn, ptr, config_prof);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
2016-05-11 13:21:10 +08:00
|
|
|
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
|
2016-02-20 12:09:31 +08:00
|
|
|
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (old_usize);
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
|
2016-02-20 12:09:31 +08:00
|
|
|
zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C size_t
|
2014-09-23 12:09:23 +08:00
|
|
|
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
2014-10-31 11:23:16 +08:00
|
|
|
size_t extra, size_t alignment, bool zero)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
2015-09-15 14:31:02 +08:00
|
|
|
size_t usize_max, usize;
|
2015-09-15 14:17:25 +08:00
|
|
|
bool prof_active;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *old_tctx, *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2015-09-15 14:17:25 +08:00
|
|
|
prof_active = prof_active_get_unlocked();
|
2016-05-11 13:21:10 +08:00
|
|
|
old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
|
2014-09-10 10:37:26 +08:00
|
|
|
/*
|
|
|
|
* usize isn't knowable before ixalloc() returns when extra is non-zero.
|
|
|
|
* Therefore, compute its maximum possible value and use that in
|
|
|
|
* prof_alloc_prep() to decide whether to capture a backtrace.
|
|
|
|
* prof_realloc() will use the actual usize to decide whether to sample.
|
|
|
|
*/
|
2016-02-26 08:42:15 +08:00
|
|
|
if (alignment == 0) {
|
|
|
|
usize_max = s2u(size+extra);
|
|
|
|
assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
|
|
|
|
} else {
|
|
|
|
usize_max = sa2u(size+extra, alignment);
|
|
|
|
if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
|
|
|
|
/*
|
|
|
|
* usize_max is out of range, and chances are that
|
|
|
|
* allocation will fail, but use the maximum possible
|
|
|
|
* value and carry on with prof_alloc_prep(), just in
|
|
|
|
* case allocation succeeds.
|
|
|
|
*/
|
|
|
|
usize_max = HUGE_MAXCLASS;
|
|
|
|
}
|
|
|
|
}
|
2015-09-15 14:31:02 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
|
2016-02-26 08:42:15 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
|
|
|
|
size, extra, alignment, zero, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
|
|
|
|
extra, alignment, zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2015-09-18 01:05:56 +08:00
|
|
|
if (usize == old_usize) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (usize);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2015-09-15 14:48:11 +08:00
|
|
|
prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
|
2015-09-15 14:17:25 +08:00
|
|
|
old_tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
|
|
|
{
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2013-12-16 08:21:30 +08:00
|
|
|
size_t usize, old_usize;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
bool zero = flags & MALLOCX_ZERO;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
assert(SIZE_T_MAX - size >= extra);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2013-01-31 07:03:11 +08:00
|
|
|
malloc_thread_init();
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
|
2015-09-16 05:39:58 +08:00
|
|
|
|
2016-02-26 08:42:15 +08:00
|
|
|
/*
|
|
|
|
* The API explicitly absolves itself of protecting against (size +
|
|
|
|
* extra) numerical overflow, but we may need to clamp extra to avoid
|
|
|
|
* exceeding HUGE_MAXCLASS.
|
|
|
|
*
|
|
|
|
* Ordinarily, size limit checking is handled deeper down, but here we
|
|
|
|
* have to check as part of (size + extra) clamping, since we need the
|
|
|
|
* clamped value in the above helper functions.
|
|
|
|
*/
|
|
|
|
if (unlikely(size > HUGE_MAXCLASS)) {
|
|
|
|
usize = old_usize;
|
|
|
|
goto label_not_resized;
|
2015-09-16 05:39:58 +08:00
|
|
|
}
|
2016-02-26 08:42:15 +08:00
|
|
|
if (unlikely(HUGE_MAXCLASS - size < extra))
|
|
|
|
extra = HUGE_MAXCLASS - size;
|
2015-09-16 05:39:58 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2014-01-13 07:05:44 +08:00
|
|
|
old_rzsize = u2rz(old_usize);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2014-09-23 12:09:23 +08:00
|
|
|
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
|
2014-10-31 11:23:16 +08:00
|
|
|
alignment, zero);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
|
|
|
|
extra, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(usize == old_usize))
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_not_resized;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
if (config_stats) {
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
|
|
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
2012-03-22 09:33:03 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
|
|
|
|
old_usize, old_rzsize, false, zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
label_not_resized:
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
UTRACE(ptr, size, ptr);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
|
|
|
JEMALLOC_ATTR(pure)
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_sallocx(const void *ptr, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t usize;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2013-01-31 07:03:11 +08:00
|
|
|
malloc_thread_init();
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
witness_assert_lockless(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2015-03-19 12:06:58 +08:00
|
|
|
if (config_ivsalloc)
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = ivsalloc(tsdn, ptr, config_prof);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = isalloc(tsdn, ptr, config_prof);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_dallocx(void *ptr, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd_t *tsd;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2015-01-30 07:30:47 +08:00
|
|
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
tcache = NULL;
|
|
|
|
else
|
|
|
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
2012-10-12 04:53:15 +08:00
|
|
|
} else
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache = tcache_get(tsd, false);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2016-05-07 03:16:00 +08:00
|
|
|
if (likely(!malloc_slow))
|
|
|
|
ifree(tsd, ptr, tcache, false);
|
|
|
|
else
|
|
|
|
ifree(tsd, ptr, tcache, true);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2012-03-01 04:56:37 +08:00
|
|
|
}
|
|
|
|
|
2014-09-10 01:29:26 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C size_t
|
2016-05-11 13:21:10 +08:00
|
|
|
inallocx(tsdn_t *tsdn, size_t size, int flags)
|
2014-09-10 01:29:26 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
|
2014-09-10 01:29:26 +08:00
|
|
|
usize = s2u(size);
|
|
|
|
else
|
|
|
|
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
2014-09-10 01:29:26 +08:00
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
2014-08-29 03:41:48 +08:00
|
|
|
je_sdallocx(void *ptr, size_t size, int flags)
|
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd_t *tsd;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
2014-09-10 01:29:26 +08:00
|
|
|
size_t usize;
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = inallocx(tsd_tsdn(tsd), size, flags);
|
|
|
|
assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2015-01-30 07:30:47 +08:00
|
|
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
tcache = NULL;
|
|
|
|
else
|
|
|
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
2014-08-29 03:41:48 +08:00
|
|
|
} else
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache = tcache_get(tsd, false);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
UTRACE(ptr, 0, 0);
|
2016-05-07 03:16:00 +08:00
|
|
|
if (likely(!malloc_slow))
|
|
|
|
isfree(tsd, ptr, usize, tcache, false);
|
|
|
|
else
|
|
|
|
isfree(tsd, ptr, usize, tcache, true);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
|
|
|
JEMALLOC_ATTR(pure)
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_nallocx(size_t size, int flags)
|
2012-03-01 04:56:37 +08:00
|
|
|
{
|
2016-02-26 07:29:49 +08:00
|
|
|
size_t usize;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
2012-03-01 04:56:37 +08:00
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (0);
|
2012-03-01 04:56:37 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
witness_assert_lockless(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
usize = inallocx(tsdn, size, flags);
|
2016-02-26 07:29:49 +08:00
|
|
|
if (unlikely(usize > HUGE_MAXCLASS))
|
|
|
|
return (0);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
2016-02-26 07:29:49 +08:00
|
|
|
return (usize);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
|
|
|
size_t newlen)
|
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
|
|
|
tsd_t *tsd;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (EAGAIN);
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
return (ret);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
|
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (EAGAIN);
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
witness_assert_lockless(tsdn);
|
|
|
|
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
|
|
|
|
witness_assert_lockless(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
return (ret);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|
|
|
void *newp, size_t newlen)
|
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
int ret;
|
|
|
|
tsd_t *tsd;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (EAGAIN);
|
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsd_tsdn(tsd));
|
2016-04-14 14:36:15 +08:00
|
|
|
return (ret);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
|
|
const char *opts)
|
|
|
|
{
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
witness_assert_lockless(tsdn);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
stats_print(write_cb, cbopaque, opts);
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 23:10:38 +08:00
|
|
|
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
|
|
|
{
|
|
|
|
size_t ret;
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn_t *tsdn;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_thread_init();
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
tsdn = tsdn_fetch();
|
|
|
|
witness_assert_lockless(tsdn);
|
2016-04-14 14:36:15 +08:00
|
|
|
|
2015-03-19 12:06:58 +08:00
|
|
|
if (config_ivsalloc)
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = ivsalloc(tsdn, ptr, config_prof);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
else
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
witness_assert_lockless(tsdn);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End non-standard functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* The following functions are used by threading libraries for protection of
|
2010-09-21 02:24:24 +08:00
|
|
|
* malloc during fork().
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2012-10-10 05:46:22 +08:00
|
|
|
/*
|
|
|
|
* If an application creates a thread before doing any allocation in the main
|
|
|
|
* thread, then calls fork(2) in the main thread followed by memory allocation
|
|
|
|
* in the child process, a race can occur that results in deadlock within the
|
|
|
|
* child: the main thread may have forked while the created thread had
|
|
|
|
* partially initialized the allocator. Ordinarily jemalloc prevents
|
|
|
|
* fork/malloc races via the following functions it registers during
|
|
|
|
* initialization using pthread_atfork(), but of course that does no good if
|
|
|
|
* the allocator isn't fully initialized at fork time. The following library
|
2014-10-11 09:19:20 +08:00
|
|
|
* constructor is a partial solution to this problem. It may still be possible
|
|
|
|
* to trigger the deadlock described above, but doing so would involve forking
|
|
|
|
* via a library constructor that runs before jemalloc's runs.
|
2012-10-10 05:46:22 +08:00
|
|
|
*/
|
2016-05-08 03:42:31 +08:00
|
|
|
#ifndef JEMALLOC_JET
|
2012-10-10 05:46:22 +08:00
|
|
|
JEMALLOC_ATTR(constructor)
|
|
|
|
static void
|
|
|
|
jemalloc_constructor(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_init();
|
|
|
|
}
|
2016-05-08 03:42:31 +08:00
|
|
|
#endif
|
2012-10-10 05:46:22 +08:00
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_prefork(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_prefork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *tsd;
|
2016-04-26 14:14:40 +08:00
|
|
|
unsigned i, j, narenas;
|
|
|
|
arena_t *arena;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2015-01-21 07:37:51 +08:00
|
|
|
if (!malloc_initialized())
|
2012-05-12 08:40:16 +08:00
|
|
|
return;
|
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
|
2016-04-26 14:14:40 +08:00
|
|
|
narenas = narenas_total_get();
|
|
|
|
|
|
|
|
witness_prefork(tsd);
|
2016-04-27 01:47:22 +08:00
|
|
|
/* Acquire all mutexes in a safe order. */
|
2016-05-11 13:21:10 +08:00
|
|
|
ctl_prefork(tsd_tsdn(tsd));
|
|
|
|
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
|
|
|
|
prof_prefork0(tsd_tsdn(tsd));
|
2016-04-26 14:14:40 +08:00
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
for (j = 0; j < narenas; j++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
|
|
|
|
NULL) {
|
2016-04-26 14:14:40 +08:00
|
|
|
switch (i) {
|
2016-05-11 13:21:10 +08:00
|
|
|
case 0:
|
|
|
|
arena_prefork0(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
arena_prefork1(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
arena_prefork2(tsd_tsdn(tsd), arena);
|
|
|
|
break;
|
2016-04-26 14:14:40 +08:00
|
|
|
default: not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-01-25 09:56:48 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
base_prefork(tsd_tsdn(tsd));
|
2016-04-26 14:14:40 +08:00
|
|
|
for (i = 0; i < narenas; i++) {
|
2016-05-11 13:21:10 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
|
|
|
|
arena_prefork3(tsd_tsdn(tsd), arena);
|
2016-04-26 14:14:40 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_prefork1(tsd_tsdn(tsd));
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2012-03-14 07:31:41 +08:00
|
|
|
jemalloc_postfork_parent(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_postfork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *tsd;
|
2016-02-25 15:58:10 +08:00
|
|
|
unsigned i, narenas;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2015-01-21 07:37:51 +08:00
|
|
|
if (!malloc_initialized())
|
2012-05-12 08:40:16 +08:00
|
|
|
return;
|
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
|
2016-04-27 01:47:22 +08:00
|
|
|
witness_postfork_parent(tsd);
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
2016-05-11 13:21:10 +08:00
|
|
|
base_postfork_parent(tsd_tsdn(tsd));
|
2016-02-25 15:58:10 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
|
|
|
|
arena_postfork_parent(tsd_tsdn(tsd), arena);
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_postfork_parent(tsd_tsdn(tsd));
|
|
|
|
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
|
|
|
|
ctl_postfork_parent(tsd_tsdn(tsd));
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
void
|
|
|
|
jemalloc_postfork_child(void)
|
|
|
|
{
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd_t *tsd;
|
2016-02-25 15:58:10 +08:00
|
|
|
unsigned i, narenas;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2016-04-14 14:36:15 +08:00
|
|
|
tsd = tsd_fetch();
|
|
|
|
|
2016-04-27 01:47:22 +08:00
|
|
|
witness_postfork_child(tsd);
|
2012-03-14 07:31:41 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
2016-05-11 13:21:10 +08:00
|
|
|
base_postfork_child(tsd_tsdn(tsd));
|
2016-02-25 15:58:10 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
|
|
|
arena_t *arena;
|
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
|
|
|
|
arena_postfork_child(tsd_tsdn(tsd), arena);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2016-05-11 13:21:10 +08:00
|
|
|
prof_postfork_child(tsd_tsdn(tsd));
|
|
|
|
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
|
|
|
|
ctl_postfork_child(tsd_tsdn(tsd));
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|