2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
malloc_tsd_data(, arenas, arena_t *, NULL)
|
|
|
|
malloc_tsd_data(, thread_allocated, thread_allocated_t,
|
|
|
|
THREAD_ALLOCATED_INITIALIZER)
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Runtime configuration options. */
|
2012-04-30 18:38:29 +08:00
|
|
|
const char *je_malloc_conf;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DEBUG
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_abort = true;
|
|
|
|
# ifdef JEMALLOC_FILL
|
|
|
|
bool opt_junk = true;
|
2012-02-11 12:22:09 +08:00
|
|
|
# else
|
|
|
|
bool opt_junk = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
bool opt_abort = false;
|
|
|
|
bool opt_junk = false;
|
2010-01-04 04:10:42 +08:00
|
|
|
#endif
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t opt_quarantine = ZU(0);
|
2012-04-13 08:09:54 +08:00
|
|
|
bool opt_redzone = false;
|
2012-04-06 04:36:17 +08:00
|
|
|
bool opt_utrace = false;
|
2012-04-06 15:35:09 +08:00
|
|
|
bool opt_valgrind = false;
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_xmalloc = false;
|
|
|
|
bool opt_zero = false;
|
2010-10-24 09:37:06 +08:00
|
|
|
size_t opt_narenas = 0;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
unsigned ncpus;
|
|
|
|
|
|
|
|
malloc_mutex_t arenas_lock;
|
|
|
|
arena_t **arenas;
|
|
|
|
unsigned narenas;
|
|
|
|
|
|
|
|
/* Set to true once the allocator has been initialized. */
|
2012-04-02 16:46:25 +08:00
|
|
|
static bool malloc_initialized = false;
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Used to let the initializing thread recursively allocate. */
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER ((unsigned long)0)
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER pthread_self()
|
|
|
|
# define IS_INITIALIZER (malloc_initializer == pthread_self())
|
2012-04-06 02:06:23 +08:00
|
|
|
static pthread_t malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-06 02:06:23 +08:00
|
|
|
# define NO_INITIALIZER false
|
2012-02-03 14:04:57 +08:00
|
|
|
# define INITIALIZER true
|
|
|
|
# define IS_INITIALIZER malloc_initializer
|
2012-04-06 02:06:23 +08:00
|
|
|
static bool malloc_initializer = NO_INITIALIZER;
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
|
|
|
/* Used to avoid initialization races. */
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
static malloc_mutex_t init_lock;
|
|
|
|
|
|
|
|
JEMALLOC_ATTR(constructor)
|
2012-04-30 18:38:31 +08:00
|
|
|
static void WINAPI
|
|
|
|
_init_init_lock(void)
|
2012-04-22 12:27:46 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
malloc_mutex_init(&init_lock);
|
|
|
|
}
|
2012-04-30 18:38:31 +08:00
|
|
|
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
# pragma section(".CRT$XCU", read)
|
|
|
|
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
|
|
|
|
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
|
|
|
|
#endif
|
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#else
|
2012-03-22 09:33:03 +08:00
|
|
|
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
|
2012-04-22 12:27:46 +08:00
|
|
|
#endif
|
2012-03-22 09:33:03 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
typedef struct {
|
|
|
|
void *p; /* Input pointer (as in realloc(p, s)). */
|
|
|
|
size_t s; /* Request size. */
|
|
|
|
void *r; /* Result pointer. */
|
|
|
|
} malloc_utrace_t;
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_UTRACE
|
|
|
|
# define UTRACE(a, b, c) do { \
|
|
|
|
if (opt_utrace) { \
|
|
|
|
malloc_utrace_t ut; \
|
|
|
|
ut.p = (a); \
|
|
|
|
ut.s = (b); \
|
|
|
|
ut.r = (c); \
|
|
|
|
utrace(&ut, sizeof(ut)); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
# define UTRACE(a, b, c)
|
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Function prototypes for non-inline static functions. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
static void stats_print_atexit(void);
|
|
|
|
static unsigned malloc_ncpus(void);
|
2010-10-24 09:37:06 +08:00
|
|
|
static bool malloc_conf_next(char const **opts_p, char const **k_p,
|
|
|
|
size_t *klen_p, char const **v_p, size_t *vlen_p);
|
|
|
|
static void malloc_conf_error(const char *msg, const char *k, size_t klen,
|
|
|
|
const char *v, size_t vlen);
|
|
|
|
static void malloc_conf_init(void);
|
2010-01-17 01:53:50 +08:00
|
|
|
static bool malloc_init_hard(void);
|
2012-02-29 13:37:38 +08:00
|
|
|
static int imemalign(void **memptr, size_t alignment, size_t size,
|
2012-03-14 03:55:21 +08:00
|
|
|
size_t min_alignment);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Begin miscellaneous support functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
|
|
|
arena_t *
|
|
|
|
arenas_extend(unsigned ind)
|
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
ret = (arena_t *)base_alloc(sizeof(arena_t));
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ret != NULL && arena_new(ret, ind) == false) {
|
|
|
|
arenas[ind] = ret;
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
/* Only reached if there is an OOM error. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* OOM here is quite inconvenient to propagate, since dealing with it
|
|
|
|
* would require a check for failure in the fast path. Instead, punt
|
|
|
|
* by using arenas[0]. In practice, this is an extremely unlikely
|
|
|
|
* failure.
|
|
|
|
*/
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error initializing arena\n");
|
2010-01-17 01:53:50 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (arenas[0]);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2012-03-14 02:09:23 +08:00
|
|
|
/* Slow path, called only by choose_arena(). */
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t *
|
|
|
|
choose_arena_hard(void)
|
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (narenas > 1) {
|
2011-03-19 04:41:33 +08:00
|
|
|
unsigned i, choose, first_null;
|
|
|
|
|
|
|
|
choose = 0;
|
|
|
|
first_null = narenas;
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_lock(&arenas_lock);
|
2011-03-19 08:56:14 +08:00
|
|
|
assert(arenas[0] != NULL);
|
2011-03-19 04:41:33 +08:00
|
|
|
for (i = 1; i < narenas; i++) {
|
|
|
|
if (arenas[i] != NULL) {
|
|
|
|
/*
|
|
|
|
* Choose the first arena that has the lowest
|
|
|
|
* number of threads assigned to it.
|
|
|
|
*/
|
|
|
|
if (arenas[i]->nthreads <
|
|
|
|
arenas[choose]->nthreads)
|
|
|
|
choose = i;
|
|
|
|
} else if (first_null == narenas) {
|
|
|
|
/*
|
|
|
|
* Record the index of the first uninitialized
|
|
|
|
* arena, in case all extant arenas are in use.
|
|
|
|
*
|
|
|
|
* NB: It is possible for there to be
|
|
|
|
* discontinuities in terms of initialized
|
|
|
|
* versus uninitialized arenas, due to the
|
|
|
|
* "thread.arena" mallctl.
|
|
|
|
*/
|
|
|
|
first_null = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
if (arenas[choose]->nthreads == 0 || first_null == narenas) {
|
2011-03-19 04:41:33 +08:00
|
|
|
/*
|
|
|
|
* Use an unloaded arena, or the least loaded arena if
|
|
|
|
* all arenas are already initialized.
|
|
|
|
*/
|
|
|
|
ret = arenas[choose];
|
|
|
|
} else {
|
|
|
|
/* Initialize a new arena. */
|
|
|
|
ret = arenas_extend(first_null);
|
|
|
|
}
|
|
|
|
ret->nthreads++;
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2011-03-19 04:41:33 +08:00
|
|
|
} else {
|
2010-01-17 01:53:50 +08:00
|
|
|
ret = arenas[0];
|
2011-03-19 04:41:33 +08:00
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
ret->nthreads++;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
arenas_tsd_set(&ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static void
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_print_atexit(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_tcache && config_stats) {
|
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* Merge stats from extant threads. This is racy, since
|
|
|
|
* individual threads do not lock when recording tcache stats
|
|
|
|
* events. As a consequence, the final stats may be slightly
|
|
|
|
* out of date by the time they are reported, if other threads
|
|
|
|
* continue to allocate.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
arena_t *arena = arenas[i];
|
|
|
|
if (arena != NULL) {
|
|
|
|
tcache_t *tcache;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
/*
|
|
|
|
* tcache_stats_merge() locks bins, so if any
|
|
|
|
* code is introduced that acquires both arena
|
|
|
|
* and bin locks in the opposite order,
|
|
|
|
* deadlocks may result.
|
|
|
|
*/
|
|
|
|
malloc_mutex_lock(&arena->lock);
|
|
|
|
ql_foreach(tcache, &arena->tcache_ql, link) {
|
|
|
|
tcache_stats_merge(tcache, arena);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&arena->lock);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc_stats_print(NULL, NULL, NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* End miscellaneous support functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin initialization functions.
|
|
|
|
*/
|
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
static unsigned
|
|
|
|
malloc_ncpus(void)
|
|
|
|
{
|
|
|
|
unsigned ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
long result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
result = si.dwNumberOfProcessors;
|
|
|
|
#else
|
2009-06-24 10:01:18 +08:00
|
|
|
result = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
if (result == -1) {
|
|
|
|
/* Error. */
|
|
|
|
ret = 1;
|
2009-06-23 05:44:08 +08:00
|
|
|
}
|
2012-04-22 12:27:46 +08:00
|
|
|
#endif
|
2009-06-24 10:01:18 +08:00
|
|
|
ret = (unsigned)result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
void
|
2011-03-19 04:41:33 +08:00
|
|
|
arenas_cleanup(void *arg)
|
|
|
|
{
|
2012-03-22 09:33:03 +08:00
|
|
|
arena_t *arena = *(arena_t **)arg;
|
2011-03-19 04:41:33 +08:00
|
|
|
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
arena->nthreads--;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static inline bool
|
|
|
|
malloc_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (malloc_initialized == false)
|
|
|
|
return (malloc_init_hard());
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
|
|
|
|
char const **v_p, size_t *vlen_p)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-10-24 09:37:06 +08:00
|
|
|
bool accept;
|
|
|
|
const char *opts = *opts_p;
|
|
|
|
|
|
|
|
*k_p = opts;
|
|
|
|
|
|
|
|
for (accept = false; accept == false;) {
|
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
|
|
|
|
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
|
|
|
|
case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
|
|
|
|
case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
|
|
|
|
case 'Y': case 'Z':
|
|
|
|
case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
|
|
|
|
case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
|
|
|
|
case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
|
|
|
|
case 's': case 't': case 'u': case 'v': case 'w': case 'x':
|
|
|
|
case 'y': case 'z':
|
|
|
|
case '0': case '1': case '2': case '3': case '4': case '5':
|
|
|
|
case '6': case '7': case '8': case '9':
|
|
|
|
case '_':
|
|
|
|
opts++;
|
|
|
|
break;
|
|
|
|
case ':':
|
|
|
|
opts++;
|
|
|
|
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
|
|
|
|
*v_p = opts;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
if (opts != *opts_p) {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with key\n");
|
|
|
|
}
|
|
|
|
return (true);
|
|
|
|
default:
|
|
|
|
malloc_write("<jemalloc>: Malformed conf string\n");
|
|
|
|
return (true);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
|
|
|
for (accept = false; accept == false;) {
|
|
|
|
switch (*opts) {
|
2012-03-07 06:57:45 +08:00
|
|
|
case ',':
|
|
|
|
opts++;
|
|
|
|
/*
|
|
|
|
* Look ahead one character here, because the next time
|
|
|
|
* this function is called, it will assume that end of
|
|
|
|
* input has been cleanly reached if no input remains,
|
|
|
|
* but we have optimistically already consumed the
|
|
|
|
* comma if one exists.
|
|
|
|
*/
|
|
|
|
if (*opts == '\0') {
|
|
|
|
malloc_write("<jemalloc>: Conf string ends "
|
|
|
|
"with comma\n");
|
|
|
|
}
|
|
|
|
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
opts++;
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
*opts_p = opts;
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
|
|
|
|
size_t vlen)
|
|
|
|
{
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
|
|
|
|
(int)vlen, v);
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_init(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
char buf[PATH_MAX + 1];
|
|
|
|
const char *opts, *k, *v;
|
|
|
|
size_t klen, vlen;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
for (i = 0; i < 3; i++) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Get runtime configuration. */
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
2012-03-02 09:19:20 +08:00
|
|
|
if (je_malloc_conf != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Use options that were compiled into the
|
|
|
|
* program.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2012-03-02 09:19:20 +08:00
|
|
|
opts = je_malloc_conf;
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
case 1: {
|
2012-04-22 12:27:46 +08:00
|
|
|
#ifndef _WIN32
|
2010-10-24 09:37:06 +08:00
|
|
|
int linklen;
|
|
|
|
const char *linkname =
|
2012-04-22 12:27:46 +08:00
|
|
|
# ifdef JEMALLOC_PREFIX
|
2010-10-24 09:37:06 +08:00
|
|
|
"/etc/"JEMALLOC_PREFIX"malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# else
|
2010-10-24 09:37:06 +08:00
|
|
|
"/etc/malloc.conf"
|
2012-04-22 12:27:46 +08:00
|
|
|
# endif
|
2010-10-24 09:37:06 +08:00
|
|
|
;
|
|
|
|
|
|
|
|
if ((linklen = readlink(linkname, buf,
|
|
|
|
sizeof(buf) - 1)) != -1) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Use the contents of the "/etc/malloc.conf"
|
|
|
|
* symbolic link's name.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
buf[linklen] = '\0';
|
|
|
|
opts = buf;
|
2012-04-22 12:27:46 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2009-06-23 03:08:42 +08:00
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2012-03-07 06:57:45 +08:00
|
|
|
} case 2: {
|
2010-10-24 09:37:06 +08:00
|
|
|
const char *envname =
|
|
|
|
#ifdef JEMALLOC_PREFIX
|
|
|
|
JEMALLOC_CPREFIX"MALLOC_CONF"
|
|
|
|
#else
|
|
|
|
"MALLOC_CONF"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
|
|
|
if ((opts = getenv(envname)) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Do nothing; opts is already initialized to
|
2010-12-18 10:07:53 +08:00
|
|
|
* the value of the MALLOC_CONF environment
|
|
|
|
* variable.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2012-03-07 06:57:45 +08:00
|
|
|
} default:
|
2009-06-23 03:08:42 +08:00
|
|
|
/* NOTREACHED */
|
|
|
|
assert(false);
|
2009-06-24 10:01:18 +08:00
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
|
|
|
|
&vlen) == false) {
|
2012-04-06 15:35:09 +08:00
|
|
|
#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
|
2012-04-21 12:39:14 +08:00
|
|
|
if (sizeof(n)-1 == klen && strncmp(n, k, \
|
2010-10-24 09:37:06 +08:00
|
|
|
klen) == 0) { \
|
|
|
|
if (strncmp("true", v, vlen) == 0 && \
|
|
|
|
vlen == sizeof("true")-1) \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = true; \
|
2010-10-24 09:37:06 +08:00
|
|
|
else if (strncmp("false", v, vlen) == \
|
|
|
|
0 && vlen == sizeof("false")-1) \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = false; \
|
2010-10-24 09:37:06 +08:00
|
|
|
else { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} \
|
2012-04-06 15:35:09 +08:00
|
|
|
hit = true; \
|
|
|
|
} else \
|
|
|
|
hit = false;
|
|
|
|
#define CONF_HANDLE_BOOL(o, n) { \
|
|
|
|
bool hit; \
|
|
|
|
CONF_HANDLE_BOOL_HIT(o, n, hit); \
|
|
|
|
if (hit) \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
2012-04-06 15:35:09 +08:00
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_SIZE_T(o, n, min, max) \
|
2012-04-21 12:39:14 +08:00
|
|
|
if (sizeof(n)-1 == klen && strncmp(n, k, \
|
2010-10-24 09:37:06 +08:00
|
|
|
klen) == 0) { \
|
2012-04-06 15:35:09 +08:00
|
|
|
uintmax_t um; \
|
2010-10-24 09:37:06 +08:00
|
|
|
char *end; \
|
|
|
|
\
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(0); \
|
2012-02-03 14:04:57 +08:00
|
|
|
um = malloc_strtoumax(v, &end, 0); \
|
2012-04-30 18:38:26 +08:00
|
|
|
if (get_errno() != 0 || (uintptr_t)end -\
|
2010-10-24 09:37:06 +08:00
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
2012-02-03 14:04:57 +08:00
|
|
|
} else if (um < min || um > max) { \
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
2012-02-03 14:04:57 +08:00
|
|
|
o = um; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
|
2012-04-21 12:39:14 +08:00
|
|
|
if (sizeof(n)-1 == klen && strncmp(n, k, \
|
2010-10-24 09:37:06 +08:00
|
|
|
klen) == 0) { \
|
|
|
|
long l; \
|
|
|
|
char *end; \
|
|
|
|
\
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(0); \
|
2010-10-24 09:37:06 +08:00
|
|
|
l = strtol(v, &end, 0); \
|
2012-04-30 18:38:26 +08:00
|
|
|
if (get_errno() != 0 || (uintptr_t)end -\
|
2010-10-24 09:37:06 +08:00
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else if (l < (ssize_t)min || l > \
|
|
|
|
(ssize_t)max) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
2012-03-07 06:57:45 +08:00
|
|
|
o = l; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
2012-03-07 06:57:45 +08:00
|
|
|
#define CONF_HANDLE_CHAR_P(o, n, d) \
|
2012-04-21 12:39:14 +08:00
|
|
|
if (sizeof(n)-1 == klen && strncmp(n, k, \
|
2010-10-24 09:37:06 +08:00
|
|
|
klen) == 0) { \
|
|
|
|
size_t cpylen = (vlen <= \
|
2012-03-07 06:57:45 +08:00
|
|
|
sizeof(o)-1) ? vlen : \
|
|
|
|
sizeof(o)-1; \
|
|
|
|
strncpy(o, v, cpylen); \
|
|
|
|
o[cpylen] = '\0'; \
|
2010-10-24 09:37:06 +08:00
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_abort, "abort")
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
2012-04-13 11:20:58 +08:00
|
|
|
* Chunks always require at least one header page, plus
|
|
|
|
* one data page in the absence of redzones, or three
|
|
|
|
* pages in the presence of redzones. In order to
|
|
|
|
* simplify options processing, fix the limit based on
|
|
|
|
* config_fill.
|
2010-10-24 09:37:06 +08:00
|
|
|
*/
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
2012-04-13 11:20:58 +08:00
|
|
|
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
|
|
|
|
SIZE_T_MAX)
|
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
|
2012-03-07 06:57:45 +08:00
|
|
|
-1, (sizeof(size_t) << 3) - 1)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_fill) {
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_junk, "junk")
|
|
|
|
CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
|
2012-04-06 15:35:09 +08:00
|
|
|
0, SIZE_T_MAX)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_redzone, "redzone")
|
|
|
|
CONF_HANDLE_BOOL(opt_zero, "zero")
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
if (config_utrace) {
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_utrace, "utrace")
|
2012-04-06 04:36:17 +08:00
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_valgrind) {
|
|
|
|
bool hit;
|
|
|
|
CONF_HANDLE_BOOL_HIT(opt_valgrind,
|
2012-04-21 12:39:14 +08:00
|
|
|
"valgrind", hit)
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_fill && opt_valgrind && hit) {
|
|
|
|
opt_junk = false;
|
|
|
|
opt_zero = false;
|
|
|
|
if (opt_quarantine == 0) {
|
|
|
|
opt_quarantine =
|
|
|
|
JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
|
|
|
|
}
|
|
|
|
opt_redzone = true;
|
|
|
|
}
|
|
|
|
if (hit)
|
|
|
|
continue;
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc) {
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
|
|
|
if (config_tcache) {
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_tcache, "tcache")
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_tcache_max", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
}
|
|
|
|
if (config_prof) {
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof, "prof")
|
|
|
|
CONF_HANDLE_CHAR_P(opt_prof_prefix,
|
|
|
|
"prof_prefix", "jeprof")
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_sample", 0,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
|
2012-03-07 06:57:45 +08:00
|
|
|
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
|
2012-04-21 12:39:14 +08:00
|
|
|
"lg_prof_interval", -1,
|
2012-02-11 12:22:09 +08:00
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
2012-04-21 12:39:14 +08:00
|
|
|
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
|
|
|
|
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error("Invalid conf pair", k, klen, v,
|
|
|
|
vlen);
|
|
|
|
#undef CONF_HANDLE_BOOL
|
|
|
|
#undef CONF_HANDLE_SIZE_T
|
|
|
|
#undef CONF_HANDLE_SSIZE_T
|
|
|
|
#undef CONF_HANDLE_CHAR_P
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
malloc_init_hard(void)
|
|
|
|
{
|
|
|
|
arena_t *init_arenas[1];
|
|
|
|
|
|
|
|
malloc_mutex_lock(&init_lock);
|
2012-02-03 14:04:57 +08:00
|
|
|
if (malloc_initialized || IS_INITIALIZER) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/*
|
|
|
|
* Another thread initialized the allocator before this one
|
|
|
|
* acquired init_lock, or this thread is the initializing
|
|
|
|
* thread, and it is recursively allocating.
|
|
|
|
*/
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifdef JEMALLOC_THREADED_INIT
|
2012-04-06 02:06:23 +08:00
|
|
|
if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
|
2010-10-24 09:37:06 +08:00
|
|
|
/* Busy-wait until the initializing thread completes. */
|
|
|
|
do {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
CPU_SPINWAIT;
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
} while (malloc_initialized == false);
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
|
|
|
malloc_initializer = INITIALIZER;
|
2010-10-24 09:37:06 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
malloc_tsd_boot();
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
prof_boot0();
|
2010-10-24 09:37:06 +08:00
|
|
|
|
|
|
|
malloc_conf_init();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-22 12:27:46 +08:00
|
|
|
#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
|
|
|
|
&& !defined(_WIN32))
|
2010-01-30 06:30:41 +08:00
|
|
|
/* Register fork handlers. */
|
2012-03-14 07:31:41 +08:00
|
|
|
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
|
|
|
|
jemalloc_postfork_child) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
|
2010-01-30 06:30:41 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
2012-02-03 14:04:57 +08:00
|
|
|
#endif
|
2010-01-28 05:10:55 +08:00
|
|
|
|
2010-01-04 04:10:42 +08:00
|
|
|
if (opt_stats_print) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Print statistics at exit. */
|
2010-01-30 06:30:41 +08:00
|
|
|
if (atexit(stats_print_atexit) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in atexit()\n");
|
2010-01-30 06:30:41 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-04-12 21:15:35 +08:00
|
|
|
if (base_boot()) {
|
2010-01-30 06:30:41 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2012-04-22 10:17:21 +08:00
|
|
|
if (chunk_boot()) {
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
if (ctl_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof)
|
|
|
|
prof_boot1();
|
2010-02-12 00:59:06 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
arena_boot();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
if (config_tcache && tcache_boot0()) {
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (huge_boot()) {
|
2009-06-23 05:44:08 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2011-11-04 09:40:03 +08:00
|
|
|
if (malloc_mutex_init(&arenas_lock))
|
|
|
|
return (true);
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Create enough scaffolding to allow recursive allocation in
|
|
|
|
* malloc_ncpus().
|
|
|
|
*/
|
|
|
|
narenas = 1;
|
|
|
|
arenas = init_arenas;
|
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize one arena here. The rest are lazily created in
|
|
|
|
* choose_arena_hard().
|
|
|
|
*/
|
|
|
|
arenas_extend(0);
|
|
|
|
if (arenas[0] == NULL) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-03-22 09:33:03 +08:00
|
|
|
/* Initialize allocation counters before any allocations can occur. */
|
|
|
|
if (config_stats && thread_allocated_tsd_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
if (arenas_tsd_boot()) {
|
2010-02-12 00:59:06 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
if (config_tcache && tcache_boot1()) {
|
2012-03-22 09:33:03 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_fill && quarantine_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
if (config_prof && prof_boot2()) {
|
2012-03-22 09:33:03 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Get number of CPUs. */
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
ncpus = malloc_ncpus();
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
|
2012-04-03 23:47:07 +08:00
|
|
|
if (mutex_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
if (opt_narenas == 0) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2009-12-29 16:09:15 +08:00
|
|
|
* For SMP systems, create more than one arena per CPU by
|
|
|
|
* default.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
if (ncpus > 1)
|
|
|
|
opt_narenas = ncpus << 2;
|
|
|
|
else
|
|
|
|
opt_narenas = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
narenas = opt_narenas;
|
|
|
|
/*
|
|
|
|
* Make sure that the arenas array can be allocated. In practice, this
|
|
|
|
* limit is enough to allow the allocator to function, but the ctl
|
|
|
|
* machinery will fail to allocate memory at far lower limits.
|
|
|
|
*/
|
|
|
|
if (narenas > chunksize / sizeof(arena_t *)) {
|
|
|
|
narenas = chunksize / sizeof(arena_t *);
|
2012-03-07 06:57:45 +08:00
|
|
|
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
|
|
|
|
narenas);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate and initialize arenas. */
|
|
|
|
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
|
|
|
|
if (arenas == NULL) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Zero the array. In practice, this should always be pre-zeroed,
|
|
|
|
* since it was just mmap()ed, but let's be sure.
|
|
|
|
*/
|
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Copy the pointer to the one arena that was already initialized. */
|
|
|
|
arenas[0] = init_arenas[0];
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
malloc_initialized = true;
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* End initialization functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc(size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2012-03-24 06:39:07 +08:00
|
|
|
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
if (malloc_init()) {
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-29 12:31:37 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(size);
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(1, usize, cnt);
|
|
|
|
if (cnt == NULL) {
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
|
2012-02-29 08:50:47 +08:00
|
|
|
SMALL_MAXCLASS) {
|
|
|
|
ret = imalloc(SMALL_MAXCLASS+1);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(ret, usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
} else
|
|
|
|
ret = imalloc(size);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats || (config_valgrind && opt_valgrind))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(size);
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = imalloc(size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_oom:
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in malloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof && ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
prof_malloc(ret, usize, cnt);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats && ret != NULL) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(ret, config_prof));
|
2012-03-22 09:33:03 +08:00
|
|
|
thread_allocated_tsd_get()->allocated += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, ret);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2011-08-13 04:48:27 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
/*
|
2012-02-11 12:22:09 +08:00
|
|
|
* Avoid any uncertainty as to how many backtrace frames to ignore in
|
2011-08-13 04:48:27 +08:00
|
|
|
* PROF_ALLOC_PREP().
|
|
|
|
*/
|
|
|
|
JEMALLOC_ATTR(noinline)
|
|
|
|
#endif
|
|
|
|
static int
|
2012-02-29 13:37:38 +08:00
|
|
|
imemalign(void **memptr, size_t alignment, size_t size,
|
2012-03-14 03:55:21 +08:00
|
|
|
size_t min_alignment)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2012-02-11 12:22:09 +08:00
|
|
|
size_t usize;
|
2011-03-23 15:37:29 +08:00
|
|
|
void *result;
|
2012-03-24 06:39:07 +08:00
|
|
|
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 03:55:21 +08:00
|
|
|
assert(min_alignment != 0);
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (malloc_init())
|
|
|
|
result = NULL;
|
|
|
|
else {
|
2012-02-29 12:31:37 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Make sure that alignment is a large enough power of 2. */
|
|
|
|
if (((alignment - 1) & alignment) != 0
|
2012-03-14 03:55:21 +08:00
|
|
|
|| (alignment < min_alignment)) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
2012-03-14 03:55:21 +08:00
|
|
|
malloc_write("<jemalloc>: Error allocating "
|
|
|
|
"aligned memory: invalid alignment\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
usize = sa2u(size, alignment);
|
2011-03-23 15:37:29 +08:00
|
|
|
if (usize == 0) {
|
|
|
|
result = NULL;
|
|
|
|
ret = ENOMEM;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2011-03-23 15:37:29 +08:00
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(2, usize, cnt);
|
|
|
|
if (cnt == NULL) {
|
2010-04-01 07:45:04 +08:00
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
|
|
|
if (prof_promote && (uintptr_t)cnt !=
|
2012-02-29 08:50:47 +08:00
|
|
|
(uintptr_t)1U && usize <= SMALL_MAXCLASS) {
|
|
|
|
assert(sa2u(SMALL_MAXCLASS+1,
|
2012-04-12 09:13:45 +08:00
|
|
|
alignment) != 0);
|
2012-02-29 08:50:47 +08:00
|
|
|
result = ipalloc(sa2u(SMALL_MAXCLASS+1,
|
2012-04-12 09:13:45 +08:00
|
|
|
alignment), alignment, false);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (result != NULL) {
|
|
|
|
arena_prof_promoted(result,
|
2010-10-21 08:39:18 +08:00
|
|
|
usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else {
|
2011-03-23 15:37:29 +08:00
|
|
|
result = ipalloc(usize, alignment,
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
false);
|
|
|
|
}
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
} else
|
2011-03-23 15:37:29 +08:00
|
|
|
result = ipalloc(usize, alignment, false);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (result == NULL) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
2012-03-14 03:55:21 +08:00
|
|
|
malloc_write("<jemalloc>: Error allocating aligned "
|
|
|
|
"memory: out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
ret = ENOMEM;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*memptr = result;
|
|
|
|
ret = 0;
|
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats && result != NULL) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(result, config_prof));
|
2012-03-22 09:33:03 +08:00
|
|
|
thread_allocated_tsd_get()->allocated += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof && result != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
prof_malloc(result, usize, cnt);
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, result);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2011-08-13 04:48:27 +08:00
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_posix_memalign(void **memptr, size_t alignment, size_t size)
|
2011-08-13 04:48:27 +08:00
|
|
|
{
|
2012-04-06 15:35:09 +08:00
|
|
|
int ret = imemalign(memptr, alignment, size, sizeof(void *));
|
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
|
|
|
|
config_prof), false);
|
|
|
|
return (ret);
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
je_aligned_alloc(size_t alignment, size_t size)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
|
|
|
|
ret = NULL;
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(err);
|
2012-03-14 03:55:21 +08:00
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
|
|
|
|
false);
|
2012-03-14 03:55:21 +08:00
|
|
|
return (ret);
|
2011-08-13 04:48:27 +08:00
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_calloc(size_t num, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
size_t num_size;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2012-03-24 06:39:07 +08:00
|
|
|
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
if (malloc_init()) {
|
|
|
|
num_size = 0;
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
num_size = num * size;
|
|
|
|
if (num_size == 0) {
|
2012-02-29 12:31:37 +08:00
|
|
|
if (num == 0 || size == 0)
|
2009-06-23 03:08:42 +08:00
|
|
|
num_size = 1;
|
|
|
|
else {
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Try to avoid division here. We know that it isn't possible to
|
|
|
|
* overflow during multiplication if neither operand uses any of the
|
|
|
|
* most significant half of the bits in a size_t.
|
|
|
|
*/
|
|
|
|
} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
|
|
|
|
&& (num_size / size != num)) {
|
|
|
|
/* size_t overflow. */
|
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(num_size);
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(1, usize, cnt);
|
|
|
|
if (cnt == NULL) {
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
|
2012-02-29 08:50:47 +08:00
|
|
|
<= SMALL_MAXCLASS) {
|
|
|
|
ret = icalloc(SMALL_MAXCLASS+1);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(ret, usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
} else
|
|
|
|
ret = icalloc(num_size);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats || (config_valgrind && opt_valgrind))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(num_size);
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = icalloc(num_size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in calloc(): out of "
|
|
|
|
"memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof && ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
prof_malloc(ret, usize, cnt);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats && ret != NULL) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(ret, config_prof));
|
2012-03-22 09:33:03 +08:00
|
|
|
thread_allocated_tsd_get()->allocated += usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, num_size, ret);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_realloc(void *ptr, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2012-04-24 04:05:32 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t old_size = 0;
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2012-03-24 06:39:07 +08:00
|
|
|
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
|
|
|
|
prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
|
2010-02-11 02:37:56 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (size == 0) {
|
2012-02-29 12:24:05 +08:00
|
|
|
if (ptr != NULL) {
|
|
|
|
/* realloc(ptr, 0) is equivalent to free(p). */
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_prof) {
|
|
|
|
old_size = isalloc(ptr, true);
|
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
old_rzsize = p2rz(ptr);
|
|
|
|
} else if (config_stats) {
|
|
|
|
old_size = isalloc(ptr, false);
|
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
old_rzsize = u2rz(old_size);
|
|
|
|
} else if (config_valgrind && opt_valgrind) {
|
|
|
|
old_size = isalloc(ptr, false);
|
|
|
|
old_rzsize = u2rz(old_size);
|
|
|
|
}
|
2012-02-29 12:24:05 +08:00
|
|
|
if (config_prof && opt_prof) {
|
|
|
|
old_ctx = prof_ctx_get(ptr);
|
2010-02-11 02:37:56 +08:00
|
|
|
cnt = NULL;
|
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
iqalloc(ptr);
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_return;
|
2012-02-29 12:31:37 +08:00
|
|
|
} else
|
|
|
|
size = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ptr != NULL) {
|
2012-02-03 14:04:57 +08:00
|
|
|
assert(malloc_initialized || IS_INITIALIZER);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_prof) {
|
|
|
|
old_size = isalloc(ptr, true);
|
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
old_rzsize = p2rz(ptr);
|
|
|
|
} else if (config_stats) {
|
|
|
|
old_size = isalloc(ptr, false);
|
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
old_rzsize = u2rz(old_size);
|
|
|
|
} else if (config_valgrind && opt_valgrind) {
|
|
|
|
old_size = isalloc(ptr, false);
|
|
|
|
old_rzsize = u2rz(old_size);
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(size);
|
2010-04-14 07:13:54 +08:00
|
|
|
old_ctx = prof_ctx_get(ptr);
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(1, usize, cnt);
|
|
|
|
if (cnt == NULL) {
|
2011-08-31 14:37:29 +08:00
|
|
|
old_ctx = NULL;
|
2010-02-11 02:37:56 +08:00
|
|
|
ret = NULL;
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2010-04-01 07:45:04 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
|
2012-02-29 08:50:47 +08:00
|
|
|
usize <= SMALL_MAXCLASS) {
|
|
|
|
ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
false, false);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(ret, usize);
|
2011-08-31 14:37:29 +08:00
|
|
|
else
|
|
|
|
old_ctx = NULL;
|
|
|
|
} else {
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
ret = iralloc(ptr, size, 0, 0, false, false);
|
2011-08-31 14:37:29 +08:00
|
|
|
if (ret == NULL)
|
|
|
|
old_ctx = NULL;
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats || (config_valgrind && opt_valgrind))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(size);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
ret = iralloc(ptr, size, 0, 0, false, false);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_oom:
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in realloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
} else {
|
2012-02-29 12:24:05 +08:00
|
|
|
/* realloc(NULL, size) is equivalent to malloc(size). */
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof)
|
2010-04-14 07:13:54 +08:00
|
|
|
old_ctx = NULL;
|
2010-02-11 02:37:56 +08:00
|
|
|
if (malloc_init()) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof)
|
2010-02-11 02:37:56 +08:00
|
|
|
cnt = NULL;
|
|
|
|
ret = NULL;
|
|
|
|
} else {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(size);
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(1, usize, cnt);
|
|
|
|
if (cnt == NULL)
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = NULL;
|
|
|
|
else {
|
|
|
|
if (prof_promote && (uintptr_t)cnt !=
|
2010-10-21 08:39:18 +08:00
|
|
|
(uintptr_t)1U && usize <=
|
2012-02-29 08:50:47 +08:00
|
|
|
SMALL_MAXCLASS) {
|
|
|
|
ret = imalloc(SMALL_MAXCLASS+1);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (ret != NULL) {
|
|
|
|
arena_prof_promoted(ret,
|
2010-10-21 08:39:18 +08:00
|
|
|
usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
ret = imalloc(size);
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats || (config_valgrind &&
|
|
|
|
opt_valgrind))
|
2012-02-11 12:22:09 +08:00
|
|
|
usize = s2u(size);
|
2010-02-11 02:37:56 +08:00
|
|
|
ret = imalloc(size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in realloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2012-04-30 18:38:26 +08:00
|
|
|
set_errno(ENOMEM);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-11 06:07:44 +08:00
|
|
|
label_return:
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof)
|
2010-10-23 01:45:59 +08:00
|
|
|
prof_realloc(ret, usize, cnt, old_size, old_ctx);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats && ret != NULL) {
|
2012-03-22 09:33:03 +08:00
|
|
|
thread_allocated_t *ta;
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(ret, config_prof));
|
2012-03-22 09:33:03 +08:00
|
|
|
ta = thread_allocated_tsd_get();
|
|
|
|
ta->allocated += usize;
|
|
|
|
ta->deallocated += old_size;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, size, ret);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2012-03-02 09:19:20 +08:00
|
|
|
je_free(void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2012-04-03 06:18:24 +08:00
|
|
|
if (ptr != NULL) {
|
|
|
|
size_t usize;
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-04-03 06:18:24 +08:00
|
|
|
assert(malloc_initialized || IS_INITIALIZER);
|
|
|
|
|
|
|
|
if (config_prof && opt_prof) {
|
2012-04-06 15:35:09 +08:00
|
|
|
usize = isalloc(ptr, config_prof);
|
2012-04-03 06:18:24 +08:00
|
|
|
prof_free(ptr, usize);
|
2012-04-06 15:35:09 +08:00
|
|
|
} else if (config_stats || config_valgrind)
|
|
|
|
usize = isalloc(ptr, config_prof);
|
2012-04-03 06:18:24 +08:00
|
|
|
if (config_stats)
|
|
|
|
thread_allocated_tsd_get()->deallocated += usize;
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
rzsize = p2rz(ptr);
|
|
|
|
iqalloc(ptr);
|
|
|
|
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
2012-04-03 06:18:24 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard override functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_memalign(size_t alignment, size_t size)
|
2010-09-21 07:44:23 +08:00
|
|
|
{
|
2012-03-24 06:39:07 +08:00
|
|
|
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2012-03-14 03:55:21 +08:00
|
|
|
imemalign(&ret, alignment, size, 1);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
|
|
|
void *
|
2012-03-02 09:19:20 +08:00
|
|
|
je_valloc(size_t size)
|
2010-09-21 07:44:23 +08:00
|
|
|
{
|
2012-03-24 06:39:07 +08:00
|
|
|
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
2012-04-02 22:04:34 +08:00
|
|
|
imemalign(&ret, PAGE, size, 1);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-26 23:46:57 +08:00
|
|
|
/*
|
|
|
|
* is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
|
|
|
|
* #define je_malloc malloc
|
|
|
|
*/
|
|
|
|
#define malloc_is_malloc 1
|
|
|
|
#define is_malloc_(a) malloc_is_ ## a
|
|
|
|
#define is_malloc(a) is_malloc_(a)
|
|
|
|
|
|
|
|
#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
|
2012-03-01 02:37:27 +08:00
|
|
|
/*
|
|
|
|
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
|
|
|
|
* to inconsistently reference libc's malloc(3)-compatible functions
|
|
|
|
* (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
|
|
|
|
*
|
2012-03-27 20:20:13 +08:00
|
|
|
* These definitions interpose hooks in glibc. The functions are actually
|
2012-03-01 02:37:27 +08:00
|
|
|
* passed an extra argument for the caller return address, which will be
|
|
|
|
* ignored.
|
|
|
|
*/
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
|
|
|
|
JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
|
|
|
|
JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
|
|
|
|
je_realloc;
|
|
|
|
JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
|
|
|
|
je_memalign;
|
2012-03-01 02:37:27 +08:00
|
|
|
#endif
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* End non-standard override functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
size_t
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc_usable_size(const void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2009-12-29 16:09:15 +08:00
|
|
|
size_t ret;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
assert(malloc_initialized || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_ivsalloc)
|
2012-04-06 15:35:09 +08:00
|
|
|
ret = ivsalloc(ptr, config_prof);
|
2012-03-27 04:13:55 +08:00
|
|
|
else
|
2012-04-06 15:35:09 +08:00
|
|
|
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
return (ret);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
void
|
2012-03-02 09:19:20 +08:00
|
|
|
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|
|
|
const char *opts)
|
2009-12-29 16:09:15 +08:00
|
|
|
{
|
|
|
|
|
2010-03-04 09:45:38 +08:00
|
|
|
stats_print(write_cb, cbopaque, opts);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
2010-01-28 05:10:55 +08:00
|
|
|
size_t newlen)
|
|
|
|
{
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-28 05:45:21 +08:00
|
|
|
if (malloc_init())
|
|
|
|
return (EAGAIN);
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
|
2010-01-28 05:45:21 +08:00
|
|
|
if (malloc_init())
|
|
|
|
return (EAGAIN);
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ctl_nametomib(name, mibp, miblenp));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
|
|
|
void *newp, size_t newlen)
|
2010-01-28 05:10:55 +08:00
|
|
|
{
|
|
|
|
|
2010-01-28 05:45:21 +08:00
|
|
|
if (malloc_init())
|
|
|
|
return (EAGAIN);
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
|
|
|
|
2012-03-03 09:47:37 +08:00
|
|
|
/*
|
|
|
|
* End non-standard functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin experimental functions.
|
|
|
|
*/
|
|
|
|
#ifdef JEMALLOC_EXPERIMENTAL
|
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
JEMALLOC_INLINE void *
|
2011-03-23 15:37:29 +08:00
|
|
|
iallocm(size_t usize, size_t alignment, bool zero)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
|
|
|
|
alignment)));
|
2011-03-23 15:37:29 +08:00
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
if (alignment != 0)
|
2011-03-23 15:37:29 +08:00
|
|
|
return (ipalloc(usize, alignment, zero));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
else if (zero)
|
2011-03-23 15:37:29 +08:00
|
|
|
return (icalloc(usize));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
else
|
2011-03-23 15:37:29 +08:00
|
|
|
return (imalloc(usize));
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
void *p;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
|
|
|
|
& (SIZE_T_MAX-1));
|
|
|
|
bool zero = flags & ALLOCM_ZERO;
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
|
|
|
|
if (malloc_init())
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
2011-03-23 15:37:29 +08:00
|
|
|
if (usize == 0)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2011-03-23 15:37:29 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2012-04-26 04:13:44 +08:00
|
|
|
prof_thr_cnt_t *cnt;
|
|
|
|
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(1, usize, cnt);
|
|
|
|
if (cnt == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
|
2012-02-29 08:50:47 +08:00
|
|
|
SMALL_MAXCLASS) {
|
2011-03-23 15:37:29 +08:00
|
|
|
size_t usize_promoted = (alignment == 0) ?
|
2012-02-29 08:50:47 +08:00
|
|
|
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
|
2012-04-12 09:13:45 +08:00
|
|
|
alignment);
|
2011-03-23 15:37:29 +08:00
|
|
|
assert(usize_promoted != 0);
|
|
|
|
p = iallocm(usize_promoted, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
if (p == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(p, usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else {
|
2011-03-23 15:37:29 +08:00
|
|
|
p = iallocm(usize, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
if (p == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2011-08-13 09:37:54 +08:00
|
|
|
prof_malloc(p, usize, cnt);
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2011-03-23 15:37:29 +08:00
|
|
|
p = iallocm(usize, alignment, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
if (p == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (rsize != NULL)
|
|
|
|
*rsize = usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
*ptr = p;
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats) {
|
2012-04-06 15:35:09 +08:00
|
|
|
assert(usize == isalloc(p, config_prof));
|
2012-03-22 09:33:03 +08:00
|
|
|
thread_allocated_tsd_get()->allocated += usize;
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, p);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_SUCCESS);
|
2012-04-11 06:07:44 +08:00
|
|
|
label_oom:
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
malloc_write("<jemalloc>: Error in allocm(): "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
*ptr = NULL;
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(0, size, 0);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_ERR_OOM);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
void *p, *q;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t usize;
|
|
|
|
size_t old_size;
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
|
|
|
|
& (SIZE_T_MAX-1));
|
|
|
|
bool zero = flags & ALLOCM_ZERO;
|
|
|
|
bool no_move = flags & ALLOCM_NO_MOVE;
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(*ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
assert(SIZE_T_MAX - size >= extra);
|
2012-02-03 14:04:57 +08:00
|
|
|
assert(malloc_initialized || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
p = *ptr;
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2012-04-26 04:13:44 +08:00
|
|
|
prof_thr_cnt_t *cnt;
|
|
|
|
|
2010-10-21 08:39:18 +08:00
|
|
|
/*
|
|
|
|
* usize isn't knowable before iralloc() returns when extra is
|
|
|
|
* non-zero. Therefore, compute its maximum possible value and
|
2011-08-13 04:48:27 +08:00
|
|
|
* use that in PROF_ALLOC_PREP() to decide whether to capture a
|
2010-10-21 08:39:18 +08:00
|
|
|
* backtrace. prof_realloc() will use the actual usize to
|
|
|
|
* decide whether to sample.
|
|
|
|
*/
|
|
|
|
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
|
2012-04-12 09:13:45 +08:00
|
|
|
sa2u(size+extra, alignment);
|
2011-08-31 14:37:29 +08:00
|
|
|
prof_ctx_t *old_ctx = prof_ctx_get(p);
|
2012-04-06 15:35:09 +08:00
|
|
|
old_size = isalloc(p, true);
|
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
old_rzsize = p2rz(p);
|
2011-08-13 04:48:27 +08:00
|
|
|
PROF_ALLOC_PREP(1, max_usize, cnt);
|
|
|
|
if (cnt == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_oom;
|
2011-08-12 13:51:00 +08:00
|
|
|
/*
|
|
|
|
* Use minimum usize to determine whether promotion may happen.
|
|
|
|
*/
|
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
|
2012-04-12 09:13:45 +08:00
|
|
|
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
|
|
|
|
<= SMALL_MAXCLASS) {
|
2012-02-29 08:50:47 +08:00
|
|
|
q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
|
|
|
|
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
alignment, zero, no_move);
|
|
|
|
if (q == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_err;
|
2012-04-02 22:04:34 +08:00
|
|
|
if (max_usize < PAGE) {
|
2011-08-12 13:51:00 +08:00
|
|
|
usize = max_usize;
|
|
|
|
arena_prof_promoted(q, usize);
|
2011-08-13 02:28:47 +08:00
|
|
|
} else
|
2012-04-06 15:35:09 +08:00
|
|
|
usize = isalloc(q, config_prof);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else {
|
|
|
|
q = iralloc(p, size, extra, alignment, zero, no_move);
|
|
|
|
if (q == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_err;
|
2012-04-06 15:35:09 +08:00
|
|
|
usize = isalloc(q, config_prof);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2010-10-23 01:45:59 +08:00
|
|
|
prof_realloc(q, usize, cnt, old_size, old_ctx);
|
2011-03-23 15:30:30 +08:00
|
|
|
if (rsize != NULL)
|
|
|
|
*rsize = usize;
|
2012-02-11 12:22:09 +08:00
|
|
|
} else {
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats) {
|
|
|
|
old_size = isalloc(p, false);
|
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
old_rzsize = u2rz(old_size);
|
|
|
|
} else if (config_valgrind && opt_valgrind) {
|
|
|
|
old_size = isalloc(p, false);
|
|
|
|
old_rzsize = u2rz(old_size);
|
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
q = iralloc(p, size, extra, alignment, zero, no_move);
|
|
|
|
if (q == NULL)
|
2012-04-11 06:07:44 +08:00
|
|
|
goto label_err;
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
2012-04-06 15:35:09 +08:00
|
|
|
usize = isalloc(q, config_prof);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (rsize != NULL) {
|
|
|
|
if (config_stats == false)
|
2012-04-06 15:35:09 +08:00
|
|
|
usize = isalloc(q, config_prof);
|
2012-02-11 12:22:09 +08:00
|
|
|
*rsize = usize;
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*ptr = q;
|
2012-03-22 09:33:03 +08:00
|
|
|
if (config_stats) {
|
|
|
|
thread_allocated_t *ta;
|
|
|
|
ta = thread_allocated_tsd_get();
|
|
|
|
ta->allocated += usize;
|
|
|
|
ta->deallocated += old_size;
|
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(p, size, q);
|
2012-04-06 15:35:09 +08:00
|
|
|
JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_SUCCESS);
|
2012-04-11 06:07:44 +08:00
|
|
|
label_err:
|
2012-04-06 04:36:17 +08:00
|
|
|
if (no_move) {
|
|
|
|
UTRACE(p, size, q);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_ERR_NOT_MOVED);
|
2012-04-06 04:36:17 +08:00
|
|
|
}
|
2012-04-11 06:07:44 +08:00
|
|
|
label_oom:
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_xmalloc && opt_xmalloc) {
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
malloc_write("<jemalloc>: Error in rallocm(): "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(p, size, 0);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_ERR_OOM);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_sallocm(const void *ptr, size_t *rsize, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
|
|
|
size_t sz;
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
assert(malloc_initialized || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_ivsalloc)
|
2012-04-06 15:35:09 +08:00
|
|
|
sz = ivsalloc(ptr, config_prof);
|
2012-02-11 12:22:09 +08:00
|
|
|
else {
|
|
|
|
assert(ptr != NULL);
|
2012-04-06 15:35:09 +08:00
|
|
|
sz = isalloc(ptr, config_prof);
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
assert(rsize != NULL);
|
|
|
|
*rsize = sz;
|
|
|
|
|
|
|
|
return (ALLOCM_SUCCESS);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_dallocm(void *ptr, int flags)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
{
|
2010-10-23 01:45:59 +08:00
|
|
|
size_t usize;
|
2012-04-06 15:35:09 +08:00
|
|
|
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
2012-02-03 14:04:57 +08:00
|
|
|
assert(malloc_initialized || IS_INITIALIZER);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
2012-04-06 04:36:17 +08:00
|
|
|
UTRACE(ptr, 0, 0);
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats || config_valgrind)
|
|
|
|
usize = isalloc(ptr, config_prof);
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_prof && opt_prof) {
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_stats == false && config_valgrind == false)
|
|
|
|
usize = isalloc(ptr, config_prof);
|
2010-10-23 01:45:59 +08:00
|
|
|
prof_free(ptr, usize);
|
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
if (config_stats)
|
2012-03-22 09:33:03 +08:00
|
|
|
thread_allocated_tsd_get()->deallocated += usize;
|
2012-04-06 15:35:09 +08:00
|
|
|
if (config_valgrind && opt_valgrind)
|
|
|
|
rzsize = p2rz(ptr);
|
|
|
|
iqalloc(ptr);
|
|
|
|
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
return (ALLOCM_SUCCESS);
|
2012-03-01 04:56:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-03-02 09:19:20 +08:00
|
|
|
je_nallocm(size_t *rsize, size_t size, int flags)
|
2012-03-01 04:56:37 +08:00
|
|
|
{
|
|
|
|
size_t usize;
|
|
|
|
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
|
|
|
|
& (SIZE_T_MAX-1));
|
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
|
|
|
|
if (malloc_init())
|
|
|
|
return (ALLOCM_ERR_OOM);
|
|
|
|
|
2012-04-12 09:13:45 +08:00
|
|
|
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
|
2012-03-01 04:56:37 +08:00
|
|
|
if (usize == 0)
|
|
|
|
return (ALLOCM_ERR_OOM);
|
|
|
|
|
|
|
|
if (rsize != NULL)
|
|
|
|
*rsize = usize;
|
|
|
|
return (ALLOCM_SUCCESS);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
2012-03-03 09:47:37 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2012-03-03 09:47:37 +08:00
|
|
|
* End experimental functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* The following functions are used by threading libraries for protection of
|
2010-09-21 02:24:24 +08:00
|
|
|
* malloc during fork().
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_prefork(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_prefork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-01-25 09:56:48 +08:00
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
|
|
|
if (malloc_initialized == false)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
assert(malloc_initialized);
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Acquire all mutexes in a safe order. */
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_prefork(&arenas_lock);
|
2010-01-25 09:56:48 +08:00
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
if (arenas[i] != NULL)
|
2012-03-14 07:31:41 +08:00
|
|
|
arena_prefork(arenas[i]);
|
2010-01-25 09:56:48 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
base_prefork();
|
|
|
|
huge_prefork();
|
|
|
|
chunk_dss_prefork();
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2012-02-03 14:04:57 +08:00
|
|
|
#ifndef JEMALLOC_MUTEX_INIT_CB
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2012-03-14 07:31:41 +08:00
|
|
|
jemalloc_postfork_parent(void)
|
2012-02-03 14:04:57 +08:00
|
|
|
#else
|
2012-04-30 18:38:29 +08:00
|
|
|
JEMALLOC_EXPORT void
|
2012-02-03 14:04:57 +08:00
|
|
|
_malloc_postfork(void)
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
#ifdef JEMALLOC_MUTEX_INIT_CB
|
|
|
|
if (malloc_initialized == false)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
assert(malloc_initialized);
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
2012-03-14 07:31:41 +08:00
|
|
|
chunk_dss_postfork_parent();
|
|
|
|
huge_postfork_parent();
|
|
|
|
base_postfork_parent();
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
if (arenas[i] != NULL)
|
|
|
|
arena_postfork_parent(arenas[i]);
|
|
|
|
}
|
|
|
|
malloc_mutex_postfork_parent(&arenas_lock);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
void
|
|
|
|
jemalloc_postfork_child(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2012-05-12 08:40:16 +08:00
|
|
|
assert(malloc_initialized);
|
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
|
|
|
chunk_dss_postfork_child();
|
|
|
|
huge_postfork_child();
|
|
|
|
base_postfork_child();
|
2009-06-23 03:08:42 +08:00
|
|
|
for (i = 0; i < narenas; i++) {
|
2010-01-25 09:56:48 +08:00
|
|
|
if (arenas[i] != NULL)
|
2012-03-14 07:31:41 +08:00
|
|
|
arena_postfork_child(arenas[i]);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_postfork_child(&arenas_lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2012-04-04 00:28:00 +08:00
|
|
|
/*
|
|
|
|
* The following functions are used for TLS allocation/deallocation in static
|
|
|
|
* binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
|
|
|
|
* is that these avoid accessing TLS variables.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void *
|
|
|
|
a0alloc(size_t size, bool zero)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (malloc_init())
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
|
|
|
|
|
|
|
if (size <= arena_maxclass)
|
|
|
|
return (arena_malloc(arenas[0], size, zero, false));
|
|
|
|
else
|
|
|
|
return (huge_malloc(size, zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
a0malloc(size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (a0alloc(size, false));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
a0calloc(size_t num, size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (a0alloc(num * size, true));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
a0free(void *ptr)
|
|
|
|
{
|
|
|
|
arena_chunk_t *chunk;
|
|
|
|
|
|
|
|
if (ptr == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
|
|
|
if (chunk != ptr)
|
|
|
|
arena_dalloc(chunk->arena, chunk, ptr, false);
|
|
|
|
else
|
|
|
|
huge_dalloc(ptr, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/******************************************************************************/
|