2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Runtime configuration options. */
|
2014-09-24 21:19:28 +08:00
|
|
|
const char *je_malloc_conf JEMALLOC_ATTR(weak);
|
2013-01-23 08:54:26 +08:00
|
|
|
bool opt_abort =
|
|
|
|
#ifdef JEMALLOC_DEBUG
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
2014-12-09 05:12:41 +08:00
|
|
|
const char *opt_junk =
|
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
"true"
|
|
|
|
#else
|
|
|
|
"false"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
bool opt_junk_alloc =
|
2013-01-23 08:54:26 +08:00
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
2014-12-09 05:12:41 +08:00
|
|
|
bool opt_junk_free =
|
|
|
|
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
|
|
|
|
true
|
|
|
|
#else
|
|
|
|
false
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t opt_quarantine = ZU(0);
|
2012-04-13 08:09:54 +08:00
|
|
|
bool opt_redzone = false;
|
2012-04-06 04:36:17 +08:00
|
|
|
bool opt_utrace = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_xmalloc = false;
|
|
|
|
bool opt_zero = false;
|
2010-10-24 09:37:06 +08:00
|
|
|
size_t opt_narenas = 0;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2014-04-16 05:33:50 +08:00
|
|
|
/* Initialized to true if the process is running inside Valgrind. */
|
|
|
|
bool in_valgrind;
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
unsigned ncpus;
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Protects arenas initialization (arenas, narenas_total). */
|
|
|
|
static malloc_mutex_t arenas_lock;
|
|
|
|
/*
|
|
|
|
* Arenas that are used to service external requests. Not all elements of the
|
|
|
|
* arenas array are necessarily used; arenas are created lazily as needed.
|
|
|
|
*
|
|
|
|
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
|
|
|
|
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
|
|
|
|
* takes some action to create them and allocate from them.
|
|
|
|
*/
|
|
|
|
static arena_t **arenas;
|
|
|
|
static unsigned narenas_total;
|
|
|
|
static arena_t *a0; /* arenas[0]; read-only after initialization. */
|
|
|
|
static unsigned narenas_auto; /* Read-only after initialization. */
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
typedef enum {
|
|
|
|
malloc_init_uninitialized = 3,
|
|
|
|
malloc_init_a0_initialized = 2,
|
|
|
|
malloc_init_recursible = 1,
|
|
|
|
malloc_init_initialized = 0 /* Common case --> jnz. */
|
|
|
|
} malloc_init_t;
|
|
|
|
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2014-10-06 08:54:10 +08:00
|
|
|
JEMALLOC_ALIGNED(CACHELINE)
|
|
|
|
const size_t index2size_tab[NSIZES] = {
|
|
|
|
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
|
|
|
|
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
|
|
|
SIZE_CLASSES
|
|
|
|
#undef SC
|
|
|
|
};
|
|
|
|
|
|
|
|
JEMALLOC_ALIGNED(CACHELINE)
|
|
|
|
const uint8_t size2index_tab[] = {
|
2014-10-11 13:34:25 +08:00
|
|
|
#if LG_TINY_MIN == 0
|
|
|
|
#warning "Dangerous LG_TINY_MIN"
|
|
|
|
#define S2B_0(i) i,
|
|
|
|
#elif LG_TINY_MIN == 1
|
|
|
|
#warning "Dangerous LG_TINY_MIN"
|
|
|
|
#define S2B_1(i) i,
|
|
|
|
#elif LG_TINY_MIN == 2
|
|
|
|
#warning "Dangerous LG_TINY_MIN"
|
|
|
|
#define S2B_2(i) i,
|
|
|
|
#elif LG_TINY_MIN == 3
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_3(i) i,
|
2014-10-11 13:34:25 +08:00
|
|
|
#elif LG_TINY_MIN == 4
|
|
|
|
#define S2B_4(i) i,
|
|
|
|
#elif LG_TINY_MIN == 5
|
|
|
|
#define S2B_5(i) i,
|
|
|
|
#elif LG_TINY_MIN == 6
|
|
|
|
#define S2B_6(i) i,
|
|
|
|
#elif LG_TINY_MIN == 7
|
|
|
|
#define S2B_7(i) i,
|
|
|
|
#elif LG_TINY_MIN == 8
|
|
|
|
#define S2B_8(i) i,
|
|
|
|
#elif LG_TINY_MIN == 9
|
|
|
|
#define S2B_9(i) i,
|
|
|
|
#elif LG_TINY_MIN == 10
|
|
|
|
#define S2B_10(i) i,
|
|
|
|
#elif LG_TINY_MIN == 11
|
|
|
|
#define S2B_11(i) i,
|
|
|
|
#else
|
|
|
|
#error "Unsupported LG_TINY_MIN"
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 1
|
|
|
|
#define S2B_1(i) S2B_0(i) S2B_0(i)
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 2
|
|
|
|
#define S2B_2(i) S2B_1(i) S2B_1(i)
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 3
|
|
|
|
#define S2B_3(i) S2B_2(i) S2B_2(i)
|
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 4
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_4(i) S2B_3(i) S2B_3(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 5
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_5(i) S2B_4(i) S2B_4(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 6
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_6(i) S2B_5(i) S2B_5(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 7
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_7(i) S2B_6(i) S2B_6(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 8
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_8(i) S2B_7(i) S2B_7(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 9
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_9(i) S2B_8(i) S2B_8(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 10
|
2014-10-10 08:54:06 +08:00
|
|
|
#define S2B_10(i) S2B_9(i) S2B_9(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
|
|
|
#if LG_TINY_MIN < 11
|
2014-10-10 08:54:06 +08:00
|
|
|
#define S2B_11(i) S2B_10(i) S2B_10(i)
|
2014-10-11 13:34:25 +08:00
|
|
|
#endif
|
2014-10-06 08:54:10 +08:00
|
|
|
#define S2B_no(i)
|
|
|
|
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
|
|
|
|
S2B_##lg_delta_lookup(index)
|
|
|
|
SIZE_CLASSES
|
|
|
|
#undef S2B_3
|
|
|
|
#undef S2B_4
|
|
|
|
#undef S2B_5
|
|
|
|
#undef S2B_6
|
|
|
|
#undef S2B_7
|
|
|
|
#undef S2B_8
|
|
|
|
#undef S2B_9
|
2014-10-10 08:54:06 +08:00
|
|
|
#undef S2B_10
|
|
|
|
#undef S2B_11
|
2014-10-06 08:54:10 +08:00
|
|
|
#undef S2B_no
|
|
|
|
#undef SC
|
|
|
|
};
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Used to let the initializing thread recursively allocate. */
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER ((unsigned long)0)
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER pthread_self()
|
|
|
|
# define IS_INITIALIZER (malloc_initializer == pthread_self())
|
2012-04-06 02:06:23 +08:00
|
|
|
static pthread_t malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER false
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER true
|
|
|
|
# define IS_INITIALIZER malloc_initializer
|
2012-04-06 02:06:23 +08:00
|
|
|
static bool malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
|
|
|
/* Used to avoid initialization races. */
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
static malloc_mutex_t init_lock;
|
|
|
|
|
|
|
|
JEMALLOC_ATTR(constructor)
|
2012-04-30 18:38:31 +08:00
|
|
|
static void WINAPI
|
|
|
|
_init_init_lock(void)
|
2012-04-22 12:27:46 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_init(&init_lock);
|
|
|
|
}
|
2012-04-30 18:38:31 +08:00
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
# pragma section(".CRT$XCU", read)
|
|
|
|
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
|
|
|
|
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
|
|
|
|
#endif
|
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#else
|
2012-03-22 09:33:03 +08:00
|
|
|
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
|
2012-04-22 12:27:46 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
typedef struct {
|
|
|
|
void *p; /* Input pointer (as in realloc(p, s)). */
|
|
|
|
size_t s; /* Request size. */
|
|
|
|
void *r; /* Result pointer. */
|
|
|
|
} malloc_utrace_t;
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_UTRACE
|
|
|
|
# define UTRACE(a, b, c) do { \
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(opt_utrace)) { \
|
2012-12-03 09:56:25 +08:00
|
|
|
int utrace_serrno = errno; \
|
2012-04-06 04:36:17 +08:00
|
|
|
malloc_utrace_t ut; \
|
|
|
|
ut.p = (a); \
|
|
|
|
ut.s = (b); \
|
|
|
|
ut.r = (c); \
|
|
|
|
utrace(&ut, sizeof(ut)); \
|
2012-12-03 09:56:25 +08:00
|
|
|
errno = utrace_serrno; \
|
2012-04-06 04:36:17 +08:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
# define UTRACE(a, b, c)
|
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
2014-01-13 07:05:44 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool malloc_init_hard_a0(void);
|
2010-01-17 01:53:50 +08:00
|
|
|
static bool malloc_init_hard(void);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Begin miscellaneous support functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
malloc_initialized(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (malloc_init_state == malloc_init_initialized);
|
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void
|
|
|
|
malloc_thread_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TSD initialization can't be safely done as a side effect of
|
|
|
|
* deallocation, because it is possible for a thread to do nothing but
|
|
|
|
* deallocate its TLS data via free(), in which case writing to TLS
|
|
|
|
* would cause write-after-free memory corruption. The quarantine
|
|
|
|
* facility *only* gets used as a side effect of deallocation, so make
|
|
|
|
* a best effort attempt at initializing its TSD by hooking all
|
|
|
|
* allocation events.
|
|
|
|
*/
|
|
|
|
if (config_fill && unlikely(opt_quarantine))
|
|
|
|
quarantine_alloc_hook();
|
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
malloc_init_a0(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (unlikely(malloc_init_state == malloc_init_uninitialized))
|
|
|
|
return (malloc_init_hard_a0());
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
malloc_init(void)
|
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(!malloc_initialized()) && malloc_init_hard())
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (true);
|
|
|
|
malloc_thread_init();
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-01-21 07:37:51 +08:00
|
|
|
* The a0*() functions are used instead of i[mcd]alloc() in situations that
|
|
|
|
* cannot tolerate TLS variable access.
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
arena_t *
|
|
|
|
a0get(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(a0 != NULL);
|
|
|
|
return (a0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
2014-11-28 03:22:36 +08:00
|
|
|
a0ialloc(size_t size, bool zero, bool is_metadata)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(malloc_init_a0()))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (NULL);
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static void
|
2014-11-28 03:22:36 +08:00
|
|
|
a0idalloc(void *ptr, bool is_metadata)
|
2015-01-21 07:37:51 +08:00
|
|
|
{
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
idalloctm(NULL, ptr, false, is_metadata);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void *
|
2014-11-28 03:22:36 +08:00
|
|
|
a0malloc(size_t size)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (a0ialloc(size, false, true));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
void
|
|
|
|
a0dalloc(void *ptr)
|
|
|
|
{
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
a0idalloc(ptr, true);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
|
|
|
|
* situations that cannot tolerate TLS variable access (TLS allocation and very
|
|
|
|
* early internal data structure initialization).
|
|
|
|
*/
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
void *
|
2015-01-21 07:37:51 +08:00
|
|
|
bootstrap_malloc(size_t size)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(size == 0))
|
|
|
|
size = 1;
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (a0ialloc(size, false, false));
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
bootstrap_calloc(size_t num, size_t size)
|
|
|
|
{
|
|
|
|
size_t num_size;
|
|
|
|
|
|
|
|
num_size = num * size;
|
|
|
|
if (unlikely(num_size == 0)) {
|
|
|
|
assert(num == 0 || size == 0);
|
|
|
|
num_size = 1;
|
|
|
|
}
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
return (a0ialloc(num_size, true, false));
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2015-01-21 07:37:51 +08:00
|
|
|
bootstrap_free(void *ptr)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (unlikely(ptr == NULL))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return;
|
|
|
|
|
2014-11-28 03:22:36 +08:00
|
|
|
a0idalloc(ptr, false);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
2014-10-08 15:54:16 +08:00
|
|
|
static arena_t *
|
|
|
|
arena_init_locked(unsigned ind)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
/* Expand arenas if necessary. */
|
|
|
|
assert(ind <= narenas_total);
|
2015-01-30 07:30:47 +08:00
|
|
|
if (ind > MALLOCX_ARENA_MAX)
|
|
|
|
return (NULL);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (ind == narenas_total) {
|
|
|
|
unsigned narenas_new = narenas_total + 1;
|
|
|
|
arena_t **arenas_new =
|
|
|
|
(arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
|
2014-11-28 03:22:36 +08:00
|
|
|
sizeof(arena_t *)));
|
2014-10-08 15:54:16 +08:00
|
|
|
if (arenas_new == NULL)
|
|
|
|
return (NULL);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
|
|
|
|
arenas_new[ind] = NULL;
|
|
|
|
/*
|
|
|
|
* Deallocate only if arenas came from a0malloc() (not
|
|
|
|
* base_alloc()).
|
|
|
|
*/
|
|
|
|
if (narenas_total != narenas_auto)
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(arenas);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arenas = arenas_new;
|
|
|
|
narenas_total = narenas_new;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* Another thread may have already initialized arenas[ind] if it's an
|
|
|
|
* auto arena.
|
2010-01-17 01:53:50 +08:00
|
|
|
*/
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena = arenas[ind];
|
|
|
|
if (arena != NULL) {
|
|
|
|
assert(ind < narenas_auto);
|
2014-10-08 15:54:16 +08:00
|
|
|
return (arena);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Actually initialize the arena. */
|
|
|
|
arena = arenas[ind] = arena_new(ind);
|
2014-10-08 15:54:16 +08:00
|
|
|
return (arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
arena_t *
|
|
|
|
arena_init(unsigned ind)
|
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
arena = arena_init_locked(ind);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
return (arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
narenas_total_get(void)
|
|
|
|
{
|
|
|
|
unsigned narenas;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
narenas = narenas_total;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
|
|
|
|
return (narenas);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_bind_locked(tsd_t *tsd, unsigned ind)
|
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
arena = arenas[ind];
|
|
|
|
arena->nthreads++;
|
|
|
|
|
|
|
|
if (tsd_nominal(tsd))
|
|
|
|
tsd_arena_set(tsd, arena);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_bind(tsd_t *tsd, unsigned ind)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
arena_bind_locked(tsd, ind);
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
|
|
|
|
{
|
|
|
|
arena_t *oldarena, *newarena;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
oldarena = arenas[oldind];
|
|
|
|
newarena = arenas[newind];
|
|
|
|
oldarena->nthreads--;
|
|
|
|
newarena->nthreads++;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
tsd_arena_set(tsd, newarena);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
arena_nbound(unsigned ind)
|
|
|
|
{
|
|
|
|
unsigned nthreads;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
nthreads = arenas[ind]->nthreads;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
return (nthreads);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
arena_unbind(tsd_t *tsd, unsigned ind)
|
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
arena = arenas[ind];
|
|
|
|
arena->nthreads--;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
tsd_arena_set(tsd, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
arena_t *
|
|
|
|
arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
|
|
|
|
unsigned narenas_cache = tsd_narenas_cache_get(tsd);
|
|
|
|
unsigned narenas_actual = narenas_total_get();
|
|
|
|
|
|
|
|
/* Deallocate old cache if it's too small. */
|
|
|
|
if (arenas_cache != NULL && narenas_cache < narenas_actual) {
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(arenas_cache);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arenas_cache = NULL;
|
|
|
|
narenas_cache = 0;
|
|
|
|
tsd_arenas_cache_set(tsd, arenas_cache);
|
|
|
|
tsd_narenas_cache_set(tsd, narenas_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate cache if it's missing. */
|
|
|
|
if (arenas_cache == NULL) {
|
|
|
|
bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
|
|
|
|
assert(ind < narenas_actual || !init_if_missing);
|
|
|
|
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
|
|
|
|
|
|
|
|
if (!*arenas_cache_bypassp) {
|
|
|
|
*arenas_cache_bypassp = true;
|
|
|
|
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
|
2014-11-28 03:22:36 +08:00
|
|
|
narenas_cache);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
*arenas_cache_bypassp = false;
|
|
|
|
} else
|
|
|
|
arenas_cache = NULL;
|
|
|
|
if (arenas_cache == NULL) {
|
|
|
|
/*
|
|
|
|
* This function must always tell the truth, even if
|
|
|
|
* it's slow, so don't let OOM or recursive allocation
|
|
|
|
* avoidance (note arenas_cache_bypass check) get in the
|
|
|
|
* way.
|
|
|
|
*/
|
|
|
|
if (ind >= narenas_actual)
|
|
|
|
return (NULL);
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
arena = arenas[ind];
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
return (arena);
|
|
|
|
}
|
|
|
|
tsd_arenas_cache_set(tsd, arenas_cache);
|
|
|
|
tsd_narenas_cache_set(tsd, narenas_cache);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/*
|
|
|
|
* Copy to cache. It's possible that the actual number of arenas has
|
|
|
|
* increased since narenas_total_get() was called above, but that causes
|
|
|
|
* no correctness issues unless two threads concurrently execute the
|
|
|
|
* arenas.extend mallctl, which we trust mallctl synchronization to
|
|
|
|
* prevent.
|
|
|
|
*/
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
if (narenas_cache > narenas_actual) {
|
|
|
|
memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
|
|
|
|
(narenas_cache - narenas_actual));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read the refreshed cache, and init the arena if necessary. */
|
|
|
|
arena = arenas_cache[ind];
|
|
|
|
if (init_if_missing && arena == NULL)
|
|
|
|
arena = arenas_cache[ind] = arena_init(ind);
|
|
|
|
return (arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/* Slow path, called only by arena_choose(). */
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t *
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_choose_hard(tsd_t *tsd)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
if (narenas_auto > 1) {
|
2011-03-19 04:41:33 +08:00
|
|
|
unsigned i, choose, first_null;
|
|
|
|
|
|
|
|
choose = 0;
|
2012-10-12 04:53:15 +08:00
|
|
|
first_null = narenas_auto;
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_lock(&arenas_lock);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
assert(a0get() != NULL);
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 1; i < narenas_auto; i++) {
|
2011-03-19 04:41:33 +08:00
|
|
|
if (arenas[i] != NULL) {
|
|
|
|
/*
|
|
|
|
* Choose the first arena that has the lowest
|
|
|
|
* number of threads assigned to it.
|
|
|
|
*/
|
|
|
|
if (arenas[i]->nthreads <
|
|
|
|
arenas[choose]->nthreads)
|
|
|
|
choose = i;
|
2012-10-12 04:53:15 +08:00
|
|
|
} else if (first_null == narenas_auto) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Record the index of the first uninitialized
|
|
|
|
* arena, in case all extant arenas are in use.
|
|
|
|
*
|
|
|
|
* NB: It is possible for there to be
|
|
|
|
* discontinuities in terms of initialized
|
|
|
|
* versus uninitialized arenas, due to the
|
|
|
|
* "thread.arena" mallctl.
|
|
|
|
*/
|
|
|
|
first_null = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
if (arenas[choose]->nthreads == 0
|
|
|
|
|| first_null == narenas_auto) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Use an unloaded arena, or the least loaded arena if
|
|
|
|
* all arenas are already initialized.
|
|
|
|
*/
|
|
|
|
ret = arenas[choose];
|
|
|
|
} else {
|
|
|
|
/* Initialize a new arena. */
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
choose = first_null;
|
2014-10-08 15:54:16 +08:00
|
|
|
ret = arena_init_locked(choose);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (ret == NULL) {
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2011-03-19 04:41:33 +08:00
|
|
|
}
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_bind_locked(tsd, choose);
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2011-03-19 04:41:33 +08:00
|
|
|
} else {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
ret = a0get();
|
|
|
|
arena_bind(tsd, 0);
|
2011-03-19 04:41:33 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
void
|
|
|
|
thread_allocated_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
thread_deallocated_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arena_cleanup(tsd_t *tsd)
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
{
|
|
|
|
arena_t *arena;
|
|
|
|
|
|
|
|
arena = tsd_arena_get(tsd);
|
|
|
|
if (arena != NULL)
|
|
|
|
arena_unbind(tsd, arena->ind);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arenas_cache_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
arena_t **arenas_cache;
|
|
|
|
|
|
|
|
arenas_cache = tsd_arenas_cache_get(tsd);
|
2015-01-22 01:01:43 +08:00
|
|
|
if (arenas_cache != NULL)
|
2015-01-21 07:37:51 +08:00
|
|
|
a0dalloc(arenas_cache);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
narenas_cache_cleanup(tsd_t *tsd)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
arenas_cache_bypass_cleanup(tsd_t *tsd)
|
2014-09-23 12:09:23 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
/* Do nothing. */
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static void
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_print_atexit(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_tcache && config_stats) {
|
2012-10-12 04:53:15 +08:00
|
|
|
unsigned narenas, i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* Merge stats from extant threads. This is racy, since
|
|
|
|
* individual threads do not lock when recording tcache stats
|
|
|
|
* events. As a consequence, the final stats may be slightly
|
|
|
|
* out of date by the time they are reported, if other threads
|
|
|
|
* continue to allocate.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
|
2012-02-11 12:22:09 +08:00
|
|
|
arena_t *arena = arenas[i];
|
|
|
|
if (arena != NULL) {
|
|
|
|
tcache_t *tcache;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* tcache_stats_merge() locks bins, so if any
|
|
|
|
* code is introduced that acquires both arena
|
|
|
|
* and bin locks in the opposite order,
|
|
|
|
* deadlocks may result.
|
|
|
|
*/
|
|
|
|
malloc_mutex_lock(&arena->lock);
|
|
|
|
ql_foreach(tcache, &arena->tcache_ql, link) {
|
|
|
|
tcache_stats_merge(tcache, arena);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&arena->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc_stats_print(NULL, NULL, NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* End miscellaneous support functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin initialization functions.
|
|
|
|
*/
|
|
|
|
|
2014-12-10 06:41:34 +08:00
|
|
|
#ifndef JEMALLOC_HAVE_SECURE_GETENV
|
|
|
|
# ifdef JEMALLOC_HAVE_ISSETUGID
|
|
|
|
static char *
|
|
|
|
secure_getenv(const char *name)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (issetugid() == 0)
|
|
|
|
return (getenv(name));
|
|
|
|
else
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
# else
|
|
|
|
static char *
|
|
|
|
secure_getenv(const char *name)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (getenv(name));
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
static unsigned
|
|
|
|
malloc_ncpus(void)
|
|
|
|
{
|
2009-06-24 10:01:18 +08:00
|
|
|
long result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
result = si.dwNumberOfProcessors;
|
|
|
|
#else
|
2009-06-24 10:01:18 +08:00
|
|
|
result = sysconf(_SC_NPROCESSORS_ONLN);
|
2012-09-27 04:28:29 +08:00
|
|
|
#endif
|
2013-11-30 08:19:44 +08:00
|
|
|
return ((result == -1) ? 1 : (unsigned)result);
|
2009-06-23 05:44:08 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static bool
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
|
|
|
|
char const **v_p, size_t *vlen_p)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-10-24 09:37:06 +08:00
|
|
|
bool accept;
|
|
|
|
const char *opts = *opts_p;
|
|
|
|
|
|
|
|
*k_p = opts;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
for (accept = false; !accept;) {
|
2010-10-24 09:37:06 +08:00
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
|
|
|
|
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
|
|
|
|
case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
|
|
|
|
case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
|
|
|
|
case 'Y': case 'Z':
|
|
|
|
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
|
|
|
|
case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
|
|
|
|
case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
|
|
|
|
case 's': case 't': case 'u': case 'v': case 'w': case 'x':
|
|
|
|
case 'y': case 'z':
|
|
|
|
case '0': case '1': case '2': case '3': case '4': case '5':
|
|
|
|
case '6': case '7': case '8': case '9':
|
|
|
|
case '_':
|
|
|
|
opts++;
|
|
|
|
break;
|
|
|
|
case ':':
|
|
|
|
opts++;
|
|
|
|
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
|
|
|
|
*v_p = opts;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
if (opts != *opts_p) {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with key\n");
|
|
|
|
}
|
|
|
|
return (true);
|
|
|
|
default:
|
|
|
|
malloc_write("<jemalloc>: Malformed conf string\n");
|
|
|
|
return (true);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
for (accept = false; !accept;) {
|
2010-10-24 09:37:06 +08:00
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case ',':
|
|
|
|
opts++;
|
|
|
|
/*
|
|
|
|
* Look ahead one character here, because the next time
|
|
|
|
* this function is called, it will assume that end of
|
|
|
|
* input has been cleanly reached if no input remains,
|
|
|
|
* but we have optimistically already consumed the
|
|
|
|
* comma if one exists.
|
|
|
|
*/
|
|
|
|
if (*opts == '\0') {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with comma\n");
|
|
|
|
}
|
|
|
|
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
opts++;
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
*opts_p = opts;
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
|
|
|
|
size_t vlen)
|
|
|
|
{
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
|
|
|
|
(int)vlen, v);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_init(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
char buf[PATH_MAX + 1];
|
|
|
|
const char *opts, *k, *v;
|
|
|
|
size_t klen, vlen;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-16 05:48:14 +08:00
|
|
|
/*
|
|
|
|
* Automatically configure valgrind before processing options. The
|
|
|
|
* valgrind option remains in jemalloc 3.x for compatibility reasons.
|
|
|
|
*/
|
|
|
|
if (config_valgrind) {
|
2014-04-16 05:33:50 +08:00
|
|
|
in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_fill && unlikely(in_valgrind)) {
|
2014-12-09 05:12:41 +08:00
|
|
|
opt_junk = "false";
|
|
|
|
opt_junk_alloc = false;
|
|
|
|
opt_junk_free = false;
|
2014-10-04 01:16:09 +08:00
|
|
|
assert(!opt_zero);
|
2012-05-16 05:48:14 +08:00
|
|
|
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
|
|
|
|
opt_redzone = true;
|
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_tcache && unlikely(in_valgrind))
|
2012-05-16 14:31:53 +08:00
|
|
|
opt_tcache = false;
|
2012-05-16 05:48:14 +08:00
|
|
|
}
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
for (i = 0; i < 3; i++) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Get runtime configuration. */
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
2012-03-02 09:19:20 +08:00
|
|
|
if (je_malloc_conf != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Use options that were compiled into the
|
|
|
|
* program.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2012-03-02 09:19:20 +08:00
|
|
|
opts = je_malloc_conf;
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
case 1: {
|
2013-09-21 01:58:11 +08:00
|
|
|
int linklen = 0;
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifndef _WIN32
|
2013-09-21 01:58:11 +08:00
|
|
|
int saved_errno = errno;
|
2010-10-24 09:37:06 +08:00
|
|
|
const char *linkname =
|
2012-04-22 12:27:46 +08:00
|
|
|
# ifdef JEMALLOC_PREFIX
|
2010-10-24 09:37:06 +08:00
|
|
|
"/etc/"JEMALLOC_PREFIX"malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# else
|
2010-10-24 09:37:06 +08:00
|
|
|
"/etc/malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# endif
|
2010-10-24 09:37:06 +08:00
|
|
|
;
|
|
|
|
|
2013-09-21 01:58:11 +08:00
|
|
|
/*
|
|
|
|
* Try to use the contents of the "/etc/malloc.conf"
|
|
|
|
* symbolic link's name.
|
|
|
|
*/
|
|
|
|
linklen = readlink(linkname, buf, sizeof(buf) - 1);
|
|
|
|
if (linklen == -1) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* No configuration specified. */
|
2013-09-21 01:58:11 +08:00
|
|
|
linklen = 0;
|
2014-12-09 06:40:14 +08:00
|
|
|
/* Restore errno. */
|
2013-09-21 01:58:11 +08:00
|
|
|
set_errno(saved_errno);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2013-09-21 01:58:11 +08:00
|
|
|
#endif
|
|
|
|
buf[linklen] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
break;
|
2012-03-07 06:57:45 +08:00
|
|
|
} case 2: {
|
2010-10-24 09:37:06 +08:00
|
|
|
const char *envname =
|
|
|
|
#ifdef JEMALLOC_PREFIX
|
|
|
|
JEMALLOC_CPREFIX"MALLOC_CONF"
|
|
|
|
#else
|
|
|
|
"MALLOC_CONF"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
2014-12-10 06:41:34 +08:00
|
|
|
if ((opts = secure_getenv(envname)) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Do nothing; opts is already initialized to
|
2010-12-18 10:07:53 +08:00
|
|
|
* the value of the MALLOC_CONF environment
|
|
|
|
* variable.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2012-03-07 06:57:45 +08:00
|
|
|
} default:
|
2013-10-22 05:56:27 +08:00
|
|
|
not_reached();
|
2009-06-24 10:01:18 +08:00
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
|
|
|
|
&vlen)) {
|
2014-04-16 07:35:08 +08:00
|
|
|
#define CONF_MATCH(n) \
|
|
|
|
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
|
2014-12-09 05:12:41 +08:00
|
|
|
#define CONF_MATCH_VALUE(n) \
|
|
|
|
(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
|
2014-04-16 07:35:08 +08:00
|
|
|
#define CONF_HANDLE_BOOL(o, n, cont) \
|
|
|
|
if (CONF_MATCH(n)) { \
|
2014-12-09 05:12:41 +08:00
|
|
|
if (CONF_MATCH_VALUE("true")) \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = true; \
|
2014-12-09 05:12:41 +08:00
|
|
|
else if (CONF_MATCH_VALUE("false")) \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = false; \
|
2010-10-24 09:37:06 +08:00
|
|
|
else { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (cont) \
|
|
|
|
continue; \
|
2012-12-24 00:51:48 +08:00
|
|
|
}
|
|
|
|
#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2012-04-06 15:35:09 +08:00
|
|
|
uintmax_t um; \
|
2010-10-24 09:37:06 +08:00
|
|
|
char *end; \
|
|
|
|
\
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(0); \
|
2012-02-03 14:04:57 +08:00
|
|
|
um = malloc_strtoumax(v, &end, 0); \
|
2012-04-30 18:38:26 +08:00
|
|
|
if (get_errno() != 0 || (uintptr_t)end -\
|
2010-10-24 09:37:06 +08:00
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
2012-12-24 00:51:48 +08:00
|
|
|
} else if (clip) { \
|
2014-10-10 08:54:06 +08:00
|
|
|
if ((min) != 0 && um < (min)) \
|
|
|
|
o = (min); \
|
|
|
|
else if (um > (max)) \
|
|
|
|
o = (max); \
|
2012-12-24 00:51:48 +08:00
|
|
|
else \
|
|
|
|
o = um; \
|
|
|
|
} else { \
|
2014-10-10 08:54:06 +08:00
|
|
|
if (((min) != 0 && um < (min)) \
|
|
|
|
|| um > (max)) { \
|
2012-12-24 00:51:48 +08:00
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range " \
|
|
|
|
"conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
|
|
|
o = um; \
|
|
|
|
} \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
long l; \
|
|
|
|
char *end; \
|
|
|
|
\
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(0); \
|
2010-10-24 09:37:06 +08:00
|
|
|
l = strtol(v, &end, 0); \
|
2012-04-30 18:38:26 +08:00
|
|
|
if (get_errno() != 0 || (uintptr_t)end -\
|
2010-10-24 09:37:06 +08:00
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
2014-10-10 08:54:06 +08:00
|
|
|
} else if (l < (ssize_t)(min) || l > \
|
|
|
|
(ssize_t)(max)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = l; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_CHAR_P(o, n, d) \
|
2014-04-16 07:35:08 +08:00
|
|
|
if (CONF_MATCH(n)) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
size_t cpylen = (vlen <= \
|
2012-03-07 06:57:45 +08:00
|
|
|
sizeof(o)-1) ? vlen : \
|
|
|
|
sizeof(o)-1; \
|
|
|
|
strncpy(o, v, cpylen); \
|
|
|
|
o[cpylen] = '\0'; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
2014-10-10 08:54:06 +08:00
|
|
|
* Chunks always require at least one header page,
|
|
|
|
* as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
|
|
|
|
* possibly an additional page in the presence of
|
|
|
|
* redzones. In order to simplify options processing,
|
|
|
|
* use a conservative bound that accommodates all these
|
|
|
|
* constraints.
|
2010-10-24 09:37:06 +08:00
|
|
|
*/
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
2014-10-10 08:54:06 +08:00
|
|
|
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
|
|
|
|
(sizeof(size_t) << 3) - 1, true)
|
2012-10-12 04:53:15 +08:00
|
|
|
if (strncmp("dss", k, klen) == 0) {
|
|
|
|
int i;
|
|
|
|
bool match = false;
|
|
|
|
for (i = 0; i < dss_prec_limit; i++) {
|
|
|
|
if (strncmp(dss_prec_names[i], v, vlen)
|
|
|
|
== 0) {
|
|
|
|
if (chunk_dss_prec_set(i)) {
|
|
|
|
malloc_conf_error(
|
|
|
|
"Error setting dss",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
} else {
|
|
|
|
opt_dss =
|
|
|
|
dss_prec_names[i];
|
|
|
|
match = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!match) {
|
2012-10-12 04:53:15 +08:00
|
|
|
malloc_conf_error("Invalid conf value",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
|
2012-12-24 00:51:48 +08:00
|
|
|
SIZE_T_MAX, false)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
|
2012-03-07 06:57:45 +08:00
|
|
|
-1, (sizeof(size_t) << 3) - 1)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_fill) {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (CONF_MATCH("junk")) {
|
|
|
|
if (CONF_MATCH_VALUE("true")) {
|
|
|
|
opt_junk = "true";
|
|
|
|
opt_junk_alloc = opt_junk_free =
|
|
|
|
true;
|
|
|
|
} else if (CONF_MATCH_VALUE("false")) {
|
|
|
|
opt_junk = "false";
|
|
|
|
opt_junk_alloc = opt_junk_free =
|
|
|
|
false;
|
|
|
|
} else if (CONF_MATCH_VALUE("alloc")) {
|
|
|
|
opt_junk = "alloc";
|
|
|
|
opt_junk_alloc = true;
|
|
|
|
opt_junk_free = false;
|
|
|
|
} else if (CONF_MATCH_VALUE("free")) {
|
|
|
|
opt_junk = "free";
|
|
|
|
opt_junk_alloc = false;
|
|
|
|
opt_junk_free = true;
|
|
|
|
} else {
|
|
|
|
malloc_conf_error(
|
|
|
|
"Invalid conf value", k,
|
|
|
|
klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
|
2012-12-24 00:51:48 +08:00
|
|
|
0, SIZE_T_MAX, false)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
|
|
|
|
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
if (config_utrace) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
|
2012-04-06 04:36:17 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
|
|
|
if (config_tcache) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_tcache, "tcache",
|
|
|
|
!config_valgrind || !in_valgrind)
|
|
|
|
if (CONF_MATCH("tcache")) {
|
|
|
|
assert(config_valgrind && in_valgrind);
|
|
|
|
if (opt_tcache) {
|
|
|
|
opt_tcache = false;
|
|
|
|
malloc_conf_error(
|
|
|
|
"tcache cannot be enabled "
|
|
|
|
"while running inside Valgrind",
|
|
|
|
k, klen, v, vlen);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_tcache_max", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
}
|
|
|
|
if (config_prof) {
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof, "prof", true)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
|
|
|
"prof_prefix", "jeprof")
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
|
|
|
|
true)
|
2014-10-04 14:25:30 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_thread_active_init,
|
|
|
|
"prof_thread_active_init", true)
|
2014-08-19 07:22:13 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_sample", 0,
|
2014-08-19 07:22:13 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1, true)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
|
|
|
|
true)
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_interval", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
2014-04-16 07:35:08 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
|
|
|
|
true)
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
|
|
|
|
true)
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
|
|
|
|
true)
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error("Invalid conf pair", k, klen, v,
|
|
|
|
vlen);
|
2014-04-16 07:35:08 +08:00
|
|
|
#undef CONF_MATCH
|
2010-10-24 09:37:06 +08:00
|
|
|
#undef CONF_HANDLE_BOOL
|
|
|
|
#undef CONF_HANDLE_SIZE_T
|
|
|
|
#undef CONF_HANDLE_SSIZE_T
|
|
|
|
#undef CONF_HANDLE_CHAR_P
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
/* init_lock must be held. */
|
2010-10-24 09:37:06 +08:00
|
|
|
static bool
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_init_hard_needed(void)
|
2010-10-24 09:37:06 +08:00
|
|
|
{
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
|
|
|
|
malloc_init_recursible)) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
|
|
|
* Another thread initialized the allocator before this one
|
|
|
|
* acquired init_lock, or this thread is the initializing
|
|
|
|
* thread, and it is recursively allocating.
|
|
|
|
*/
|
|
|
|
return (false);
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2014-10-04 01:16:09 +08:00
|
|
|
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/* Busy-wait until the initializing thread completes. */
|
|
|
|
do {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
CPU_SPINWAIT;
|
|
|
|
malloc_mutex_lock(&init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
} while (!malloc_initialized());
|
2010-10-24 09:37:06 +08:00
|
|
|
return (false);
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
return (true);
|
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
/* init_lock must be held. */
|
|
|
|
static bool
|
|
|
|
malloc_init_hard_a0_locked(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_initializer = INITIALIZER;
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
prof_boot0();
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_init();
|
2010-01-04 04:10:42 +08:00
|
|
|
if (opt_stats_print) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Print statistics at exit. */
|
2010-01-30 06:30:41 +08:00
|
|
|
if (atexit(stats_print_atexit) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in atexit()\n");
|
2010-01-30 06:30:41 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2015-01-21 07:37:51 +08:00
|
|
|
if (base_boot())
|
2010-01-30 06:30:41 +08:00
|
|
|
return (true);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (chunk_boot())
|
2010-01-28 05:10:55 +08:00
|
|
|
return (true);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (ctl_boot())
|
2012-02-03 14:04:57 +08:00
|
|
|
return (true);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
prof_boot1();
|
2012-02-29 08:50:47 +08:00
|
|
|
arena_boot();
|
2015-01-21 07:37:51 +08:00
|
|
|
if (config_tcache && tcache_boot())
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
return (true);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (malloc_mutex_init(&arenas_lock))
|
2011-11-04 09:40:03 +08:00
|
|
|
return (true);
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Create enough scaffolding to allow recursive allocation in
|
|
|
|
* malloc_ncpus().
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_total = narenas_auto = 1;
|
2015-01-21 07:37:51 +08:00
|
|
|
arenas = &a0;
|
2012-10-12 04:53:15 +08:00
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Initialize one arena here. The rest are lazily created in
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
* arena_choose_hard().
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2015-01-21 07:37:51 +08:00
|
|
|
if (arena_init(0) == NULL)
|
2009-06-24 10:01:18 +08:00
|
|
|
return (true);
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_init_state = malloc_init_a0_initialized;
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
static bool
|
|
|
|
malloc_init_hard_a0(void)
|
|
|
|
{
|
|
|
|
bool ret;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
ret = malloc_init_hard_a0_locked();
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize data structures which may trigger recursive allocation.
|
|
|
|
*
|
|
|
|
* init_lock must be held.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
malloc_init_hard_recursible(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_init_state = malloc_init_recursible;
|
2009-06-24 10:01:18 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
2013-10-22 05:11:09 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
ncpus = malloc_ncpus();
|
2013-10-22 05:11:09 +08:00
|
|
|
|
|
|
|
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
|
2014-05-29 10:47:15 +08:00
|
|
|
&& !defined(_WIN32) && !defined(__native_client__))
|
2013-10-22 05:11:09 +08:00
|
|
|
/* LinuxThreads's pthread_atfork() allocates. */
|
|
|
|
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
|
|
|
jemalloc_postfork_child) != 0) {
|
|
|
|
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
|
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
#endif
|
2009-06-24 10:01:18 +08:00
|
|
|
malloc_mutex_lock(&init_lock);
|
2015-01-21 07:37:51 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
/* init_lock must be held. */
|
|
|
|
static bool
|
|
|
|
malloc_init_hard_finish(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (mutex_boot())
|
2012-04-03 23:47:07 +08:00
|
|
|
return (true);
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
if (opt_narenas == 0) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2009-12-29 16:09:15 +08:00
|
|
|
* For SMP systems, create more than one arena per CPU by
|
|
|
|
* default.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
if (ncpus > 1)
|
|
|
|
opt_narenas = ncpus << 2;
|
|
|
|
else
|
|
|
|
opt_narenas = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_auto = opt_narenas;
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
|
|
|
* Make sure that the arenas array can be allocated. In practice, this
|
|
|
|
* limit is enough to allow the allocator to function, but the ctl
|
|
|
|
* machinery will fail to allocate memory at far lower limits.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
if (narenas_auto > chunksize / sizeof(arena_t *)) {
|
|
|
|
narenas_auto = chunksize / sizeof(arena_t *);
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_auto);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2012-10-12 04:53:15 +08:00
|
|
|
narenas_total = narenas_auto;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/* Allocate and initialize arenas. */
|
2012-10-12 04:53:15 +08:00
|
|
|
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
|
2015-01-21 07:37:51 +08:00
|
|
|
if (arenas == NULL)
|
2009-06-23 03:08:42 +08:00
|
|
|
return (true);
|
|
|
|
/*
|
|
|
|
* Zero the array. In practice, this should always be pre-zeroed,
|
|
|
|
* since it was just mmap()ed, but let's be sure.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas_total);
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Copy the pointer to the one arena that was already initialized. */
|
2015-01-21 07:37:51 +08:00
|
|
|
arenas[0] = a0;
|
|
|
|
|
|
|
|
malloc_init_state = malloc_init_initialized;
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
malloc_init_hard(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
if (!malloc_init_hard_needed()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (malloc_init_state != malloc_init_a0_initialized &&
|
|
|
|
malloc_init_hard_a0_locked()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
if (malloc_tsd_boot0()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
if (config_prof && prof_boot2()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
|
|
|
malloc_init_hard_recursible();
|
|
|
|
|
|
|
|
if (malloc_init_hard_finish()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
malloc_tsd_boot1();
|
2009-06-23 03:08:42 +08:00
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* End initialization functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imalloc(tsd, LARGE_MINCLASS);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
arena_prof_promoted(p, usize);
|
|
|
|
} else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imalloc(tsd, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imalloc_prof(tsd_t *tsd, size_t usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imalloc_prof_sample(tsd, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imalloc(tsd, usize);
|
2014-10-31 14:18:45 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_malloc(p, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2014-04-23 09:41:15 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
|
2014-04-23 09:41:15 +08:00
|
|
|
{
|
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (unlikely(malloc_init()))
|
2014-04-23 09:41:15 +08:00
|
|
|
return (NULL);
|
2014-10-05 02:12:53 +08:00
|
|
|
*tsd = tsd_fetch();
|
2014-04-23 09:41:15 +08:00
|
|
|
|
|
|
|
if (config_prof && opt_prof) {
|
|
|
|
*usize = s2u(size);
|
2014-09-23 12:09:23 +08:00
|
|
|
return (imalloc_prof(*tsd, *usize));
|
2014-04-23 09:41:15 +08:00
|
|
|
}
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
2014-04-23 09:41:15 +08:00
|
|
|
*usize = s2u(size);
|
2014-09-23 12:09:23 +08:00
|
|
|
return (imalloc(*tsd, size));
|
2014-04-23 09:41:15 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc(size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-29 12:31:37 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = imalloc_body(size, &tsd, &usize);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in malloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats && likely(ret != NULL)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(ret, config_prof));
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, ret);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
|
|
|
|
prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-09-08 05:40:19 +08:00
|
|
|
assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imalloc(tsd, LARGE_MINCLASS);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
arena_prof_promoted(p, usize);
|
|
|
|
} else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = ipalloc(tsd, usize, alignment, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imemalign_prof_sample(tsd, alignment, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = ipalloc(tsd, usize, alignment, false);
|
2014-10-31 14:18:45 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_malloc(p, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2011-08-13 04:48:27 +08:00
|
|
|
static int
|
2014-01-13 07:05:44 +08:00
|
|
|
imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2012-02-11 12:22:09 +08:00
|
|
|
size_t usize;
|
2011-03-23 15:37:29 +08:00
|
|
|
void *result;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 03:55:21 +08:00
|
|
|
assert(min_alignment != 0);
|
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2009-06-23 03:08:42 +08:00
|
|
|
result = NULL;
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_oom;
|
|
|
|
} else {
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2012-02-29 12:31:37 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Make sure that alignment is a large enough power of 2. */
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(((alignment - 1) & alignment) != 0
|
|
|
|
|| (alignment < min_alignment))) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2012-03-14 03:55:21 +08:00
|
|
|
malloc_write("<jemalloc>: Error allocating "
|
|
|
|
"aligned memory: invalid alignment\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
usize = sa2u(size, alignment);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(usize == 0)) {
|
2011-03-23 15:37:29 +08:00
|
|
|
result = NULL;
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_oom;
|
2011-03-23 15:37:29 +08:00
|
|
|
}
|
|
|
|
|
2014-09-10 10:37:26 +08:00
|
|
|
if (config_prof && opt_prof)
|
2014-09-23 12:09:23 +08:00
|
|
|
result = imemalign_prof(tsd, alignment, usize);
|
2014-09-10 10:37:26 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
result = ipalloc(tsd, usize, alignment, false);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(result == NULL))
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_oom;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*memptr = result;
|
|
|
|
ret = 0;
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats && likely(result != NULL)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(result, config_prof));
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, result);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
2014-01-13 07:05:44 +08:00
|
|
|
label_oom:
|
|
|
|
assert(result == NULL);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2014-01-13 07:05:44 +08:00
|
|
|
malloc_write("<jemalloc>: Error allocating aligned memory: "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
ret = ENOMEM;
|
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2011-08-13 04:48:27 +08:00
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_posix_memalign(void **memptr, size_t alignment, size_t size)
|
2011-08-13 04:48:27 +08:00
|
|
|
{
|
2012-04-06 15:35:09 +08:00
|
|
|
int ret = imemalign(memptr, alignment, size, sizeof(void *));
|
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
|
|
|
|
config_prof), false);
|
|
|
|
return (ret);
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
je_aligned_alloc(size_t alignment, size_t size)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
int err;
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
|
2012-03-14 03:55:21 +08:00
|
|
|
ret = NULL;
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(err);
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
|
|
|
|
false);
|
2012-03-14 03:55:21 +08:00
|
|
|
return (ret);
|
2011-08-13 04:48:27 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-09-23 12:09:23 +08:00
|
|
|
icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-09-23 12:09:23 +08:00
|
|
|
p = icalloc(tsd, LARGE_MINCLASS);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
arena_prof_promoted(p, usize);
|
|
|
|
} else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = icalloc(tsd, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
icalloc_prof(tsd_t *tsd, size_t usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2014-09-23 12:09:23 +08:00
|
|
|
p = icalloc_prof_sample(tsd, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = icalloc(tsd, usize);
|
2014-10-31 14:18:45 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-08-19 07:22:13 +08:00
|
|
|
prof_malloc(p, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_calloc(size_t num, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2009-06-23 03:08:42 +08:00
|
|
|
size_t num_size;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (unlikely(malloc_init())) {
|
2009-06-23 03:08:42 +08:00
|
|
|
num_size = 0;
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
num_size = num * size;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(num_size == 0)) {
|
2012-02-29 12:31:37 +08:00
|
|
|
if (num == 0 || size == 0)
|
2009-06-23 03:08:42 +08:00
|
|
|
num_size = 1;
|
|
|
|
else {
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Try to avoid division here. We know that it isn't possible to
|
|
|
|
* overflow during multiplication if neither operand uses any of the
|
|
|
|
* most significant half of the bits in a size_t.
|
|
|
|
*/
|
2014-09-12 07:20:44 +08:00
|
|
|
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
|
|
|
|
2))) && (num_size / size != num))) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* size_t overflow. */
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(num_size);
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = icalloc_prof(tsd, usize);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(num_size);
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = icalloc(tsd, num_size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in calloc(): out of "
|
|
|
|
"memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats && likely(ret != NULL)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(ret, config_prof));
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, num_size, ret);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-10-25 01:18:57 +08:00
|
|
|
irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
|
|
|
|
prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloc(tsd, oldptr, old_usize, LARGE_MINCLASS, 0, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
arena_prof_promoted(p, usize);
|
|
|
|
} else
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *old_tctx, *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
old_tctx = prof_tctx_get(oldptr);
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, usize, true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
2014-10-25 01:18:57 +08:00
|
|
|
p = irealloc_prof_sample(tsd, oldptr, old_usize, usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
else
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE_C void
|
2015-01-30 07:30:47 +08:00
|
|
|
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
if (config_prof && opt_prof) {
|
|
|
|
usize = isalloc(ptr, config_prof);
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_free(tsd, ptr, usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else if (config_stats || config_valgrind)
|
|
|
|
usize = isalloc(ptr, config_prof);
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_stats)
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_deallocatedp_get(tsd) += usize;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2014-01-13 07:05:44 +08:00
|
|
|
rzsize = p2rz(ptr);
|
2015-01-30 07:30:47 +08:00
|
|
|
iqalloc(tsd, ptr, tcache);
|
2014-01-13 07:05:44 +08:00
|
|
|
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
|
|
|
}
|
|
|
|
|
2014-08-29 03:41:48 +08:00
|
|
|
JEMALLOC_INLINE_C void
|
2015-01-30 07:30:47 +08:00
|
|
|
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
|
2014-08-29 03:41:48 +08:00
|
|
|
{
|
|
|
|
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
if (config_prof && opt_prof)
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_free(tsd, ptr, usize);
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_stats)
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_deallocatedp_get(tsd) += usize;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2014-08-29 03:41:48 +08:00
|
|
|
rzsize = p2rz(ptr);
|
2015-01-30 07:30:47 +08:00
|
|
|
isqalloc(tsd, ptr, usize, tcache);
|
2014-08-29 03:41:48 +08:00
|
|
|
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_realloc(void *ptr, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2014-10-05 05:59:17 +08:00
|
|
|
tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2013-12-16 08:21:30 +08:00
|
|
|
size_t old_usize = 0;
|
2013-12-11 05:51:52 +08:00
|
|
|
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(size == 0)) {
|
2012-02-29 12:24:05 +08:00
|
|
|
if (ptr != NULL) {
|
2014-01-13 07:05:44 +08:00
|
|
|
/* realloc(ptr, 0) is equivalent to free(ptr). */
|
|
|
|
UTRACE(ptr, 0, 0);
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2015-01-30 07:30:47 +08:00
|
|
|
ifree(tsd, ptr, tcache_get(tsd, false));
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(ptr != NULL)) {
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2013-01-31 07:03:11 +08:00
|
|
|
malloc_thread_init();
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2014-10-25 01:18:57 +08:00
|
|
|
old_usize = isalloc(ptr, config_prof);
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
|
|
|
old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (config_prof && opt_prof) {
|
|
|
|
usize = s2u(size);
|
|
|
|
ret = irealloc_prof(tsd, ptr, old_usize, usize);
|
|
|
|
} else {
|
|
|
|
if (config_stats || (config_valgrind &&
|
|
|
|
unlikely(in_valgrind)))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(size);
|
2014-10-25 01:18:57 +08:00
|
|
|
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
|
2014-10-05 02:12:53 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
2012-02-29 12:24:05 +08:00
|
|
|
/* realloc(NULL, size) is equivalent to malloc(size). */
|
2014-09-23 12:09:23 +08:00
|
|
|
ret = imalloc_body(size, &tsd, &usize);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(ret == NULL)) {
|
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
2014-01-13 07:05:44 +08:00
|
|
|
malloc_write("<jemalloc>: Error in realloc(): "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats && likely(ret != NULL)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(ret, config_prof));
|
2014-10-05 02:12:53 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
|
|
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, size, ret);
|
2014-04-16 07:35:08 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
|
|
|
|
old_rzsize, true, false);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-03-02 09:19:20 +08:00
|
|
|
je_free(void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
if (likely(ptr != NULL)) {
|
|
|
|
tsd_t *tsd = tsd_fetch();
|
|
|
|
ifree(tsd, ptr, tcache_get(tsd, false));
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard override functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_memalign(size_t alignment, size_t size)
|
2010-09-21 07:44:23 +08:00
|
|
|
{
|
2012-03-24 06:39:07 +08:00
|
|
|
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2015-01-17 10:04:17 +08:00
|
|
|
if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
|
|
|
|
ret = NULL;
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_valloc(size_t size)
|
2010-09-21 07:44:23 +08:00
|
|
|
{
|
2012-03-24 06:39:07 +08:00
|
|
|
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2015-01-17 10:04:17 +08:00
|
|
|
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
|
|
|
|
ret = NULL;
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-26 23:46:57 +08:00
|
|
|
/*
|
|
|
|
* is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
|
|
|
|
* #define je_malloc malloc
|
|
|
|
*/
|
|
|
|
#define malloc_is_malloc 1
|
|
|
|
#define is_malloc_(a) malloc_is_ ## a
|
|
|
|
#define is_malloc(a) is_malloc_(a)
|
|
|
|
|
2014-08-19 04:06:39 +08:00
|
|
|
#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
|
2012-03-01 02:37:27 +08:00
|
|
|
/*
|
|
|
|
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
|
|
|
|
* to inconsistently reference libc's malloc(3)-compatible functions
|
|
|
|
* (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
|
|
|
|
*
|
2012-03-27 20:20:13 +08:00
|
|
|
* These definitions interpose hooks in glibc. The functions are actually
|
2012-03-01 02:37:27 +08:00
|
|
|
* passed an extra argument for the caller return address, which will be
|
|
|
|
* ignored.
|
|
|
|
*/
|
2014-05-02 06:51:30 +08:00
|
|
|
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
|
|
|
|
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
|
|
|
|
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
|
2014-08-19 04:06:39 +08:00
|
|
|
# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
|
2014-05-02 06:51:30 +08:00
|
|
|
JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
|
2012-04-30 18:38:29 +08:00
|
|
|
je_memalign;
|
2014-08-19 04:06:39 +08:00
|
|
|
# endif
|
2012-03-01 02:37:27 +08:00
|
|
|
#endif
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* End non-standard override functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard functions.
|
|
|
|
*/
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
|
2014-09-08 05:40:19 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
|
|
|
|
*alignment = 0;
|
|
|
|
*usize = s2u(size);
|
|
|
|
} else {
|
|
|
|
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
|
|
|
|
*usize = sa2u(size, *alignment);
|
|
|
|
}
|
|
|
|
*zero = MALLOCX_ZERO_GET(flags);
|
2015-01-30 07:30:47 +08:00
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
*tcache = NULL;
|
|
|
|
else
|
|
|
|
*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
|
|
|
} else
|
|
|
|
*tcache = tcache_get(tsd, true);
|
2014-09-08 05:40:19 +08:00
|
|
|
if ((flags & MALLOCX_ARENA_MASK) != 0) {
|
|
|
|
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
*arena = arena_get(tsd, arena_ind, true, true);
|
|
|
|
if (unlikely(*arena == NULL))
|
|
|
|
return (true);
|
2015-01-30 07:30:47 +08:00
|
|
|
} else
|
2014-09-08 05:40:19 +08:00
|
|
|
*arena = NULL;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (false);
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C bool
|
|
|
|
imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
|
2014-09-08 05:40:19 +08:00
|
|
|
{
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(flags == 0)) {
|
2014-09-08 05:40:19 +08:00
|
|
|
*usize = s2u(size);
|
|
|
|
assert(usize != 0);
|
|
|
|
*alignment = 0;
|
|
|
|
*zero = false;
|
2015-01-30 07:30:47 +08:00
|
|
|
*tcache = tcache_get(tsd, true);
|
2014-09-08 05:40:19 +08:00
|
|
|
*arena = NULL;
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (false);
|
2014-09-08 05:40:19 +08:00
|
|
|
} else {
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (imallocx_flags_decode_hard(tsd, size, flags, usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
alignment, zero, tcache, arena));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-20 08:20:18 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache, arena_t *arena)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
if (alignment != 0)
|
|
|
|
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
|
2014-09-08 05:40:19 +08:00
|
|
|
if (zero)
|
2015-01-30 07:30:47 +08:00
|
|
|
return (icalloct(tsd, usize, tcache, arena));
|
|
|
|
return (imalloct(tsd, usize, tcache, arena));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
|
2014-09-08 05:40:19 +08:00
|
|
|
{
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(flags == 0))
|
2014-09-23 12:09:23 +08:00
|
|
|
return (imalloc(tsd, size));
|
2015-01-30 07:30:47 +08:00
|
|
|
return (imallocx_flags(tsd, usize, alignment, zero, tcache, arena));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-09-08 05:40:19 +08:00
|
|
|
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
|
|
|
|
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
|
2015-01-30 07:30:47 +08:00
|
|
|
p = imalloct(tsd, LARGE_MINCLASS, tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
arena_prof_promoted(p, usize);
|
2014-09-08 05:40:19 +08:00
|
|
|
} else {
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment;
|
|
|
|
bool zero;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
2014-09-08 05:40:19 +08:00
|
|
|
arena_t *arena;
|
|
|
|
prof_tctx_t *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
&zero, &tcache, &arena)))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (NULL);
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, *usize, true);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-09-08 05:40:19 +08:00
|
|
|
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else
|
2014-09-08 05:40:19 +08:00
|
|
|
p = NULL;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, true);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-09-08 05:40:19 +08:00
|
|
|
prof_malloc(p, *usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2014-09-08 05:40:19 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
2014-09-08 05:40:19 +08:00
|
|
|
{
|
|
|
|
size_t alignment;
|
|
|
|
bool zero;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
2014-09-08 05:40:19 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely(flags == 0)) {
|
|
|
|
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
2014-09-08 05:40:19 +08:00
|
|
|
*usize = s2u(size);
|
2014-09-23 12:09:23 +08:00
|
|
|
return (imalloc(tsd, size));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
&alignment, &zero, &tcache, &arena)))
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
return (NULL);
|
2015-01-30 07:30:47 +08:00
|
|
|
return (imallocx_flags(tsd, *usize, alignment, zero, tcache, arena));
|
2014-09-08 05:40:19 +08:00
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
void *
|
|
|
|
je_mallocx(size_t size, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
void *p;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
2014-10-05 02:12:53 +08:00
|
|
|
if (unlikely(malloc_init()))
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2014-09-08 05:40:19 +08:00
|
|
|
if (config_prof && opt_prof)
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imallocx_prof(tsd, size, flags, &usize);
|
2014-09-08 05:40:19 +08:00
|
|
|
else
|
2014-09-23 12:09:23 +08:00
|
|
|
p = imallocx_no_prof(tsd, size, flags, &usize);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL))
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_oom;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(p, config_prof));
|
2014-10-05 02:12:53 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, p);
|
2014-09-08 05:40:19 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (p);
|
2012-04-11 06:07:44 +08:00
|
|
|
label_oom:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, 0);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (NULL);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
static void *
|
2014-10-25 01:18:57 +08:00
|
|
|
irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
|
|
|
|
prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-04-12 05:24:51 +08:00
|
|
|
if (usize <= SMALL_MAXCLASS) {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
arena_prof_promoted(p, usize);
|
|
|
|
} else {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C void *
|
2014-09-23 12:09:23 +08:00
|
|
|
irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
2015-01-30 07:30:47 +08:00
|
|
|
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
|
|
|
arena_t *arena)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2014-09-10 10:37:26 +08:00
|
|
|
prof_tctx_t *old_tctx, *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
old_tctx = prof_tctx_get(oldptr);
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, *usize, false);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
|
2015-01-30 07:30:47 +08:00
|
|
|
alignment, *usize, zero, tcache, arena, tctx);
|
2014-09-10 10:37:26 +08:00
|
|
|
} else {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache, arena);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (NULL);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
if (p == oldptr && alignment != 0) {
|
|
|
|
/*
|
|
|
|
* The allocation did not move, so it is possible that the size
|
|
|
|
* class is smaller than would guarantee the requested
|
|
|
|
* alignment, and that the alignment constraint was
|
|
|
|
* serendipitously satisfied. Additionally, old_usize may not
|
|
|
|
* be the same as the current usize because of in-place large
|
|
|
|
* reallocation. Therefore, query the actual value of usize.
|
|
|
|
*/
|
|
|
|
*usize = isalloc(p, config_prof);
|
|
|
|
}
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
void *
|
|
|
|
je_rallocx(void *ptr, size_t size, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
void *p;
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2014-09-12 07:20:44 +08:00
|
|
|
size_t usize;
|
2014-10-25 01:18:57 +08:00
|
|
|
size_t old_usize;
|
2013-12-11 05:51:52 +08:00
|
|
|
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
bool zero = flags & MALLOCX_ZERO;
|
|
|
|
arena_t *arena;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_thread_init();
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
|
2014-09-08 05:40:19 +08:00
|
|
|
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena = arena_get(tsd, arena_ind, true, true);
|
|
|
|
if (unlikely(arena == NULL))
|
|
|
|
goto label_oom;
|
2015-01-30 07:30:47 +08:00
|
|
|
} else
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
arena = NULL;
|
2015-01-30 07:30:47 +08:00
|
|
|
|
|
|
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
tcache = NULL;
|
|
|
|
else
|
|
|
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
|
|
|
} else
|
|
|
|
tcache = tcache_get(tsd, true);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
2014-10-25 01:18:57 +08:00
|
|
|
old_usize = isalloc(ptr, config_prof);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2014-01-13 07:05:44 +08:00
|
|
|
old_rzsize = u2rz(old_usize);
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2014-01-13 07:05:44 +08:00
|
|
|
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
|
|
|
assert(usize != 0);
|
2014-09-23 12:09:23 +08:00
|
|
|
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
|
2015-01-30 07:30:47 +08:00
|
|
|
zero, tcache, arena);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
goto label_oom;
|
|
|
|
} else {
|
2014-10-25 01:18:57 +08:00
|
|
|
p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache, arena);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(p == NULL))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
goto label_oom;
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
usize = isalloc(p, config_prof);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (config_stats) {
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
|
|
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
UTRACE(ptr, size, p);
|
2014-04-16 07:35:08 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
|
|
|
|
old_rzsize, false, zero);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (p);
|
|
|
|
label_oom:
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_xmalloc && unlikely(opt_xmalloc)) {
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
UTRACE(ptr, size, 0);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C size_t
|
|
|
|
ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
|
2014-10-31 11:23:16 +08:00
|
|
|
size_t alignment, bool zero)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
2014-10-25 01:18:57 +08:00
|
|
|
if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
|
2014-01-13 07:05:44 +08:00
|
|
|
return (old_usize);
|
|
|
|
usize = isalloc(ptr, config_prof);
|
|
|
|
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
|
2014-10-31 11:23:16 +08:00
|
|
|
size_t alignment, size_t max_usize, bool zero, prof_tctx_t *tctx)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
if (tctx == NULL)
|
2014-01-13 07:05:44 +08:00
|
|
|
return (old_usize);
|
|
|
|
/* Use minimum usize to determine whether promotion may happen. */
|
2014-04-12 05:24:51 +08:00
|
|
|
if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
|
|
|
|
SMALL_MAXCLASS) {
|
2014-10-25 01:18:57 +08:00
|
|
|
if (ixalloc(ptr, old_usize, SMALL_MAXCLASS+1,
|
|
|
|
(SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra -
|
|
|
|
(SMALL_MAXCLASS+1), alignment, zero))
|
2014-01-13 07:05:44 +08:00
|
|
|
return (old_usize);
|
|
|
|
usize = isalloc(ptr, config_prof);
|
2014-10-06 08:54:10 +08:00
|
|
|
if (max_usize < LARGE_MINCLASS)
|
2014-01-13 07:05:44 +08:00
|
|
|
arena_prof_promoted(ptr, usize);
|
|
|
|
} else {
|
|
|
|
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
2014-10-31 11:23:16 +08:00
|
|
|
zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE_C size_t
|
2014-09-23 12:09:23 +08:00
|
|
|
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
2014-10-31 11:23:16 +08:00
|
|
|
size_t extra, size_t alignment, bool zero)
|
2014-01-13 07:05:44 +08:00
|
|
|
{
|
2014-09-10 10:37:26 +08:00
|
|
|
size_t max_usize, usize;
|
|
|
|
prof_tctx_t *old_tctx, *tctx;
|
2014-01-13 07:05:44 +08:00
|
|
|
|
2014-08-19 07:22:13 +08:00
|
|
|
old_tctx = prof_tctx_get(ptr);
|
2014-09-10 10:37:26 +08:00
|
|
|
/*
|
|
|
|
* usize isn't knowable before ixalloc() returns when extra is non-zero.
|
|
|
|
* Therefore, compute its maximum possible value and use that in
|
|
|
|
* prof_alloc_prep() to decide whether to capture a backtrace.
|
|
|
|
* prof_realloc() will use the actual usize to decide whether to sample.
|
|
|
|
*/
|
|
|
|
max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
|
|
|
|
alignment);
|
2014-09-23 12:09:23 +08:00
|
|
|
tctx = prof_alloc_prep(tsd, max_usize, false);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
2014-01-13 07:05:44 +08:00
|
|
|
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
|
2014-10-31 11:23:16 +08:00
|
|
|
alignment, zero, max_usize, tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
} else {
|
|
|
|
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
2014-10-31 11:23:16 +08:00
|
|
|
zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(usize == old_usize)) {
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_alloc_rollback(tsd, tctx, false);
|
2014-01-13 07:05:44 +08:00
|
|
|
return (usize);
|
2014-09-10 10:37:26 +08:00
|
|
|
}
|
2014-09-23 12:09:23 +08:00
|
|
|
prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
|
2014-01-13 07:05:44 +08:00
|
|
|
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t
|
|
|
|
je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
|
|
|
{
|
2014-09-23 12:09:23 +08:00
|
|
|
tsd_t *tsd;
|
2013-12-16 08:21:30 +08:00
|
|
|
size_t usize, old_usize;
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2014-09-08 05:40:19 +08:00
|
|
|
size_t alignment = MALLOCX_ALIGN_GET(flags);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
bool zero = flags & MALLOCX_ZERO;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
assert(SIZE_T_MAX - size >= extra);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2013-01-31 07:03:11 +08:00
|
|
|
malloc_thread_init();
|
2014-10-05 02:12:53 +08:00
|
|
|
tsd = tsd_fetch();
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2014-01-13 07:05:44 +08:00
|
|
|
old_usize = isalloc(ptr, config_prof);
|
2014-09-12 07:20:44 +08:00
|
|
|
if (config_valgrind && unlikely(in_valgrind))
|
2014-01-13 07:05:44 +08:00
|
|
|
old_rzsize = u2rz(old_usize);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2014-09-23 12:09:23 +08:00
|
|
|
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
|
2014-10-31 11:23:16 +08:00
|
|
|
alignment, zero);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2014-01-13 07:05:44 +08:00
|
|
|
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
2014-10-31 11:23:16 +08:00
|
|
|
zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(usize == old_usize))
|
2014-01-13 07:05:44 +08:00
|
|
|
goto label_not_resized;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
if (config_stats) {
|
2014-09-23 12:09:23 +08:00
|
|
|
*tsd_thread_allocatedp_get(tsd) += usize;
|
|
|
|
*tsd_thread_deallocatedp_get(tsd) += old_usize;
|
2012-03-22 09:33:03 +08:00
|
|
|
}
|
2014-04-16 07:35:08 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
|
|
|
|
old_rzsize, false, zero);
|
2014-01-13 07:05:44 +08:00
|
|
|
label_not_resized:
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
UTRACE(ptr, size, ptr);
|
|
|
|
return (usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t
|
|
|
|
je_sallocx(const void *ptr, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2013-01-31 07:03:11 +08:00
|
|
|
malloc_thread_init();
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2015-03-19 12:06:58 +08:00
|
|
|
if (config_ivsalloc)
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
usize = ivsalloc(ptr, config_prof);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
else
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
usize = isalloc(ptr, config_prof);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
void
|
|
|
|
je_dallocx(void *ptr, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd_t *tsd;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd = tsd_fetch();
|
2015-01-30 07:30:47 +08:00
|
|
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
tcache = NULL;
|
|
|
|
else
|
|
|
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
2012-10-12 04:53:15 +08:00
|
|
|
} else
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache = tcache_get(tsd, false);
|
2012-10-12 04:53:15 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
ifree(tsd_fetch(), ptr, tcache);
|
2012-03-01 04:56:37 +08:00
|
|
|
}
|
|
|
|
|
2014-09-10 01:29:26 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE_C size_t
|
|
|
|
inallocx(size_t size, int flags)
|
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
|
2014-09-10 01:29:26 +08:00
|
|
|
usize = s2u(size);
|
|
|
|
else
|
|
|
|
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
|
|
|
|
assert(usize != 0);
|
|
|
|
return (usize);
|
|
|
|
}
|
|
|
|
|
2014-08-29 03:41:48 +08:00
|
|
|
void
|
|
|
|
je_sdallocx(void *ptr, size_t size, int flags)
|
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd_t *tsd;
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcache;
|
2014-09-10 01:29:26 +08:00
|
|
|
size_t usize;
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
2014-09-10 01:29:26 +08:00
|
|
|
usize = inallocx(size, flags);
|
|
|
|
assert(usize == isalloc(ptr, config_prof));
|
2014-08-29 03:41:48 +08:00
|
|
|
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
tsd = tsd_fetch();
|
2015-01-30 07:30:47 +08:00
|
|
|
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
|
|
|
|
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
|
|
|
|
tcache = NULL;
|
|
|
|
else
|
|
|
|
tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
|
2014-08-29 03:41:48 +08:00
|
|
|
} else
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache = tcache_get(tsd, false);
|
2014-08-29 03:41:48 +08:00
|
|
|
|
|
|
|
UTRACE(ptr, 0, 0);
|
2015-01-30 07:30:47 +08:00
|
|
|
isfree(tsd, ptr, usize, tcache);
|
2014-08-29 03:41:48 +08:00
|
|
|
}
|
|
|
|
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
size_t
|
|
|
|
je_nallocx(size_t size, int flags)
|
2012-03-01 04:56:37 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (0);
|
2012-03-01 04:56:37 +08:00
|
|
|
|
2014-09-10 01:29:26 +08:00
|
|
|
return (inallocx(size, flags));
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
|
|
|
size_t newlen)
|
|
|
|
{
|
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (EAGAIN);
|
|
|
|
|
|
|
|
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
|
|
|
|
{
|
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (EAGAIN);
|
|
|
|
|
|
|
|
return (ctl_nametomib(name, mibp, miblenp));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|
|
|
void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
|
2014-09-10 03:26:05 +08:00
|
|
|
if (unlikely(malloc_init()))
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
return (EAGAIN);
|
|
|
|
|
|
|
|
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
|
|
const char *opts)
|
|
|
|
{
|
|
|
|
|
|
|
|
stats_print(write_cb, cbopaque, opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized() || IS_INITIALIZER);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
malloc_thread_init();
|
|
|
|
|
2015-03-19 12:06:58 +08:00
|
|
|
if (config_ivsalloc)
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
ret = ivsalloc(ptr, config_prof);
|
|
|
|
else
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
|
Implement the *allocx() API.
Implement the *allocx() API, which is a successor to the *allocm() API.
The *allocx() functions are slightly simpler to use because they have
fewer parameters, they directly return the results of primary interest,
and mallocx()/rallocx() avoid the strict aliasing pitfall that
allocm()/rallocx() share with posix_memalign(). The following code
violates strict aliasing rules:
foo_t *foo;
allocm((void **)&foo, NULL, 42, 0);
whereas the following is safe:
foo_t *foo;
void *p;
allocm(&p, NULL, 42, 0);
foo = (foo_t *)p;
mallocx() does not have this problem:
foo_t *foo = (foo_t *)mallocx(42, 0);
2013-12-13 14:35:52 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End non-standard functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* The following functions are used by threading libraries for protection of
|
2010-09-21 02:24:24 +08:00
|
|
|
* malloc during fork().
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2012-10-10 05:46:22 +08:00
|
|
|
/*
|
|
|
|
* If an application creates a thread before doing any allocation in the main
|
|
|
|
* thread, then calls fork(2) in the main thread followed by memory allocation
|
|
|
|
* in the child process, a race can occur that results in deadlock within the
|
|
|
|
* child: the main thread may have forked while the created thread had
|
|
|
|
* partially initialized the allocator. Ordinarily jemalloc prevents
|
|
|
|
* fork/malloc races via the following functions it registers during
|
|
|
|
* initialization using pthread_atfork(), but of course that does no good if
|
|
|
|
* the allocator isn't fully initialized at fork time. The following library
|
2014-10-11 09:19:20 +08:00
|
|
|
* constructor is a partial solution to this problem. It may still be possible
|
|
|
|
* to trigger the deadlock described above, but doing so would involve forking
|
|
|
|
* via a library constructor that runs before jemalloc's runs.
|
2012-10-10 05:46:22 +08:00
|
|
|
*/
|
|
|
|
JEMALLOC_ATTR(constructor)
|
|
|
|
static void
|
|
|
|
jemalloc_constructor(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
malloc_init();
|
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_prefork(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_prefork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-01-25 09:56:48 +08:00
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2015-01-21 07:37:51 +08:00
|
|
|
if (!malloc_initialized())
|
2012-05-12 08:40:16 +08:00
|
|
|
return;
|
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Acquire all mutexes in a safe order. */
|
2012-10-10 05:46:22 +08:00
|
|
|
ctl_prefork();
|
2013-02-07 03:59:30 +08:00
|
|
|
prof_prefork();
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_prefork(&arenas_lock);
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0; i < narenas_total; i++) {
|
2010-01-25 09:56:48 +08:00
|
|
|
if (arenas[i] != NULL)
|
2012-03-14 07:31:41 +08:00
|
|
|
arena_prefork(arenas[i]);
|
2010-01-25 09:56:48 +08:00
|
|
|
}
|
2012-10-10 07:16:00 +08:00
|
|
|
chunk_prefork();
|
2012-03-14 07:31:41 +08:00
|
|
|
base_prefork();
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2012-03-14 07:31:41 +08:00
|
|
|
jemalloc_postfork_parent(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_postfork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
2015-01-21 07:37:51 +08:00
|
|
|
if (!malloc_initialized())
|
2012-05-12 08:40:16 +08:00
|
|
|
return;
|
|
|
|
#endif
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
2012-03-14 07:31:41 +08:00
|
|
|
base_postfork_parent();
|
2012-10-10 07:16:00 +08:00
|
|
|
chunk_postfork_parent();
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0; i < narenas_total; i++) {
|
2012-03-14 07:31:41 +08:00
|
|
|
if (arenas[i] != NULL)
|
|
|
|
arena_postfork_parent(arenas[i]);
|
|
|
|
}
|
|
|
|
malloc_mutex_postfork_parent(&arenas_lock);
|
2013-02-07 03:59:30 +08:00
|
|
|
prof_postfork_parent();
|
2012-10-10 05:46:22 +08:00
|
|
|
ctl_postfork_parent();
|
2012-03-14 07:31:41 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
void
|
|
|
|
jemalloc_postfork_child(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2015-01-21 07:37:51 +08:00
|
|
|
assert(malloc_initialized());
|
2012-05-12 08:40:16 +08:00
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
|
|
|
base_postfork_child();
|
2012-10-10 07:16:00 +08:00
|
|
|
chunk_postfork_child();
|
2012-10-12 04:53:15 +08:00
|
|
|
for (i = 0; i < narenas_total; i++) {
|
2010-01-25 09:56:48 +08:00
|
|
|
if (arenas[i] != NULL)
|
2012-03-14 07:31:41 +08:00
|
|
|
arena_postfork_child(arenas[i]);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_postfork_child(&arenas_lock);
|
2013-02-07 03:59:30 +08:00
|
|
|
prof_postfork_child();
|
2012-10-10 05:46:22 +08:00
|
|
|
ctl_postfork_child();
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|