2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
malloc_mutex_t arenas_lock;
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t **arenas;
|
|
|
|
unsigned narenas;
|
|
|
|
static unsigned next_arena;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifndef NO_TLS
|
2010-09-06 01:35:13 +08:00
|
|
|
__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
|
|
|
|
#else
|
|
|
|
pthread_key_t arenas_tsd;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
|
|
|
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
# ifndef NO_TLS
|
|
|
|
__thread thread_allocated_t thread_allocated_tls;
|
|
|
|
# else
|
|
|
|
pthread_key_t thread_allocated_tsd;
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Set to true once the allocator has been initialized. */
|
2010-10-21 08:39:18 +08:00
|
|
|
static bool malloc_initialized = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Used to let the initializing thread recursively allocate. */
|
2010-10-21 08:39:18 +08:00
|
|
|
static pthread_t malloc_initializer = (unsigned long)0;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Used to avoid initialization races. */
|
2010-10-21 08:39:18 +08:00
|
|
|
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef DYNAMIC_PAGE_SHIFT
|
|
|
|
size_t pagesize;
|
|
|
|
size_t pagesize_mask;
|
|
|
|
size_t lg_pagesize;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
unsigned ncpus;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Runtime configuration options. */
|
2010-10-24 09:37:06 +08:00
|
|
|
const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DEBUG
|
2010-01-17 01:53:50 +08:00
|
|
|
bool opt_abort = true;
|
|
|
|
# ifdef JEMALLOC_FILL
|
|
|
|
bool opt_junk = true;
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
bool opt_abort = false;
|
|
|
|
# ifdef JEMALLOC_FILL
|
|
|
|
bool opt_junk = false;
|
|
|
|
# endif
|
2010-01-04 04:10:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
bool opt_sysv = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
|
|
|
bool opt_xmalloc = false;
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_FILL
|
|
|
|
bool opt_zero = false;
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
size_t opt_narenas = 0;
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Function prototypes for non-inline static functions. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-03-04 09:45:38 +08:00
|
|
|
static void wrtmessage(void *cbopaque, const char *s);
|
2010-01-17 01:53:50 +08:00
|
|
|
static void stats_print_atexit(void);
|
|
|
|
static unsigned malloc_ncpus(void);
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
|
|
|
|
static void thread_allocated_cleanup(void *arg);
|
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
static bool malloc_conf_next(char const **opts_p, char const **k_p,
|
|
|
|
size_t *klen_p, char const **v_p, size_t *vlen_p);
|
|
|
|
static void malloc_conf_error(const char *msg, const char *k, size_t klen,
|
|
|
|
const char *v, size_t vlen);
|
|
|
|
static void malloc_conf_init(void);
|
2010-01-17 01:53:50 +08:00
|
|
|
static bool malloc_init_hard(void);
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2010-01-17 01:53:50 +08:00
|
|
|
/* malloc_message() setup. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifdef JEMALLOC_HAVE_ATTR
|
|
|
|
JEMALLOC_ATTR(visibility("hidden"))
|
|
|
|
#else
|
|
|
|
static
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
void
|
2010-03-04 09:45:38 +08:00
|
|
|
wrtmessage(void *cbopaque, const char *s)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
2010-09-21 10:20:48 +08:00
|
|
|
#ifdef JEMALLOC_CC_SILENCE
|
|
|
|
int result =
|
|
|
|
#endif
|
|
|
|
write(STDERR_FILENO, s, strlen(s));
|
|
|
|
#ifdef JEMALLOC_CC_SILENCE
|
|
|
|
if (result < 0)
|
|
|
|
result = errno;
|
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2010-03-04 09:45:38 +08:00
|
|
|
void (*JEMALLOC_P(malloc_message))(void *, const char *s)
|
|
|
|
JEMALLOC_ATTR(visibility("default")) = wrtmessage;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Begin miscellaneous support functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Create a new arena and insert it into the arenas array at index ind. */
|
|
|
|
arena_t *
|
|
|
|
arenas_extend(unsigned ind)
|
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Allocate enough space for trailing bins. */
|
2010-10-02 09:02:43 +08:00
|
|
|
ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
|
|
|
|
+ (sizeof(arena_bin_t) * nbins));
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ret != NULL && arena_new(ret, ind) == false) {
|
|
|
|
arenas[ind] = ret;
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
/* Only reached if there is an OOM error. */
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* OOM here is quite inconvenient to propagate, since dealing with it
|
|
|
|
* would require a check for failure in the fast path. Instead, punt
|
|
|
|
* by using arenas[0]. In practice, this is an extremely unlikely
|
|
|
|
* failure.
|
|
|
|
*/
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error initializing arena\n");
|
2010-01-17 01:53:50 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (arenas[0]);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* Choose an arena based on a per-thread value (slow-path code only, called
|
|
|
|
* only by choose_arena()).
|
2009-06-24 10:01:18 +08:00
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
arena_t *
|
|
|
|
choose_arena_hard(void)
|
|
|
|
{
|
|
|
|
arena_t *ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (narenas > 1) {
|
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
if ((ret = arenas[next_arena]) == NULL)
|
|
|
|
ret = arenas_extend(next_arena);
|
|
|
|
next_arena = (next_arena + 1) % narenas;
|
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2009-12-29 16:09:15 +08:00
|
|
|
} else
|
2010-01-17 01:53:50 +08:00
|
|
|
ret = arenas[0];
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
ARENA_SET(ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
return (ret);
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2010-09-21 07:05:41 +08:00
|
|
|
/*
|
|
|
|
* glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
|
|
|
|
* provide a wrapper.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
buferror(int errnum, char *buf, size_t buflen)
|
|
|
|
{
|
|
|
|
#ifdef _GNU_SOURCE
|
|
|
|
char *b = strerror_r(errno, buf, buflen);
|
|
|
|
if (b != buf) {
|
|
|
|
strncpy(buf, b, buflen);
|
|
|
|
buf[buflen-1] = '\0';
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
#else
|
|
|
|
return (strerror_r(errno, buf, buflen));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
static void
|
2010-01-17 01:53:50 +08:00
|
|
|
stats_print_atexit(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
|
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* Merge stats from extant threads. This is racy, since individual
|
|
|
|
* threads do not lock when recording tcache stats events. As a
|
|
|
|
* consequence, the final stats may be slightly out of date by the time
|
|
|
|
* they are reported, if other threads continue to allocate.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
arena_t *arena = arenas[i];
|
|
|
|
if (arena != NULL) {
|
|
|
|
tcache_t *tcache;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-03-18 07:27:39 +08:00
|
|
|
/*
|
|
|
|
* tcache_stats_merge() locks bins, so if any code is
|
|
|
|
* introduced that acquires both arena and bin locks in
|
|
|
|
* the opposite order, deadlocks may result.
|
|
|
|
*/
|
2010-01-17 01:53:50 +08:00
|
|
|
malloc_mutex_lock(&arena->lock);
|
|
|
|
ql_foreach(tcache, &arena->tcache_ql, link) {
|
|
|
|
tcache_stats_merge(tcache, arena);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&arena->lock);
|
|
|
|
}
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
2010-01-20 04:11:25 +08:00
|
|
|
JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2011-02-14 10:11:54 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
|
|
|
|
thread_allocated_t *
|
|
|
|
thread_allocated_get_hard(void)
|
|
|
|
{
|
|
|
|
thread_allocated_t *thread_allocated = (thread_allocated_t *)
|
|
|
|
imalloc(sizeof(thread_allocated_t));
|
|
|
|
if (thread_allocated == NULL) {
|
|
|
|
static thread_allocated_t static_thread_allocated = {0, 0};
|
|
|
|
malloc_write("<jemalloc>: Error allocating TSD;"
|
|
|
|
" mallctl(\"thread.{de,}allocated[p]\", ...)"
|
|
|
|
" will be inaccurate\n");
|
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
return (&static_thread_allocated);
|
|
|
|
}
|
|
|
|
pthread_setspecific(thread_allocated_tsd, thread_allocated);
|
|
|
|
thread_allocated->allocated = 0;
|
|
|
|
thread_allocated->deallocated = 0;
|
|
|
|
return (thread_allocated);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/*
|
|
|
|
* End miscellaneous support functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin initialization functions.
|
|
|
|
*/
|
|
|
|
|
2009-06-23 05:44:08 +08:00
|
|
|
static unsigned
|
|
|
|
malloc_ncpus(void)
|
|
|
|
{
|
|
|
|
unsigned ret;
|
2009-06-24 10:01:18 +08:00
|
|
|
long result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
result = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
if (result == -1) {
|
|
|
|
/* Error. */
|
|
|
|
ret = 1;
|
2009-06-23 05:44:08 +08:00
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
ret = (unsigned)result;
|
2009-06-23 05:44:08 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
|
|
|
|
static void
|
|
|
|
thread_allocated_cleanup(void *arg)
|
|
|
|
{
|
|
|
|
uint64_t *allocated = (uint64_t *)arg;
|
|
|
|
|
|
|
|
if (allocated != NULL)
|
|
|
|
idalloc(allocated);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* FreeBSD's pthreads implementation calls malloc(3), so the malloc
|
|
|
|
* implementation has to take pains to avoid infinite recursion during
|
|
|
|
* initialization.
|
|
|
|
*/
|
|
|
|
static inline bool
|
|
|
|
malloc_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (malloc_initialized == false)
|
|
|
|
return (malloc_init_hard());
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
|
|
|
|
char const **v_p, size_t *vlen_p)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-10-24 09:37:06 +08:00
|
|
|
bool accept;
|
|
|
|
const char *opts = *opts_p;
|
|
|
|
|
|
|
|
*k_p = opts;
|
|
|
|
|
|
|
|
for (accept = false; accept == false;) {
|
|
|
|
switch (*opts) {
|
|
|
|
case 'A': case 'B': case 'C': case 'D': case 'E':
|
|
|
|
case 'F': case 'G': case 'H': case 'I': case 'J':
|
|
|
|
case 'K': case 'L': case 'M': case 'N': case 'O':
|
|
|
|
case 'P': case 'Q': case 'R': case 'S': case 'T':
|
|
|
|
case 'U': case 'V': case 'W': case 'X': case 'Y':
|
|
|
|
case 'Z':
|
|
|
|
case 'a': case 'b': case 'c': case 'd': case 'e':
|
|
|
|
case 'f': case 'g': case 'h': case 'i': case 'j':
|
|
|
|
case 'k': case 'l': case 'm': case 'n': case 'o':
|
|
|
|
case 'p': case 'q': case 'r': case 's': case 't':
|
|
|
|
case 'u': case 'v': case 'w': case 'x': case 'y':
|
|
|
|
case 'z':
|
|
|
|
case '0': case '1': case '2': case '3': case '4':
|
|
|
|
case '5': case '6': case '7': case '8': case '9':
|
|
|
|
case '_':
|
|
|
|
opts++;
|
|
|
|
break;
|
|
|
|
case ':':
|
|
|
|
opts++;
|
|
|
|
*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
|
|
|
|
*v_p = opts;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
if (opts != *opts_p) {
|
|
|
|
malloc_write("<jemalloc>: Conf string "
|
|
|
|
"ends with key\n");
|
|
|
|
}
|
|
|
|
return (true);
|
|
|
|
default:
|
|
|
|
malloc_write("<jemalloc>: Malformed conf "
|
|
|
|
"string\n");
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
|
|
|
|
for (accept = false; accept == false;) {
|
|
|
|
switch (*opts) {
|
|
|
|
case ',':
|
|
|
|
opts++;
|
|
|
|
/*
|
|
|
|
* Look ahead one character here, because the
|
|
|
|
* next time this function is called, it will
|
|
|
|
* assume that end of input has been cleanly
|
|
|
|
* reached if no input remains, but we have
|
|
|
|
* optimistically already consumed the comma if
|
|
|
|
* one exists.
|
|
|
|
*/
|
|
|
|
if (*opts == '\0') {
|
|
|
|
malloc_write("<jemalloc>: Conf string "
|
|
|
|
"ends with comma\n");
|
|
|
|
}
|
|
|
|
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
case '\0':
|
|
|
|
*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
|
|
|
|
accept = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
opts++;
|
|
|
|
break;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
*opts_p = opts;
|
|
|
|
return (false);
|
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
|
|
|
|
size_t vlen)
|
|
|
|
{
|
|
|
|
char buf[PATH_MAX + 1];
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_write("<jemalloc>: ");
|
|
|
|
malloc_write(msg);
|
|
|
|
malloc_write(": ");
|
|
|
|
memcpy(buf, k, klen);
|
|
|
|
memcpy(&buf[klen], ":", 1);
|
|
|
|
memcpy(&buf[klen+1], v, vlen);
|
|
|
|
buf[klen+1+vlen] = '\0';
|
|
|
|
malloc_write(buf);
|
|
|
|
malloc_write("\n");
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
static void
|
|
|
|
malloc_conf_init(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
char buf[PATH_MAX + 1];
|
|
|
|
const char *opts, *k, *v;
|
|
|
|
size_t klen, vlen;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
for (i = 0; i < 3; i++) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Get runtime configuration. */
|
|
|
|
switch (i) {
|
|
|
|
case 0:
|
2010-10-24 09:37:06 +08:00
|
|
|
if (JEMALLOC_P(malloc_conf) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Use options that were compiled into the
|
|
|
|
* program.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
opts = JEMALLOC_P(malloc_conf);
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
case 1: {
|
|
|
|
int linklen;
|
|
|
|
const char *linkname =
|
|
|
|
#ifdef JEMALLOC_PREFIX
|
|
|
|
"/etc/"JEMALLOC_PREFIX"malloc.conf"
|
|
|
|
#else
|
|
|
|
"/etc/malloc.conf"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
|
|
|
if ((linklen = readlink(linkname, buf,
|
|
|
|
sizeof(buf) - 1)) != -1) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Use the contents of the "/etc/malloc.conf"
|
|
|
|
* symbolic link's name.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
buf[linklen] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
case 2: {
|
|
|
|
const char *envname =
|
|
|
|
#ifdef JEMALLOC_PREFIX
|
|
|
|
JEMALLOC_CPREFIX"MALLOC_CONF"
|
|
|
|
#else
|
|
|
|
"MALLOC_CONF"
|
|
|
|
#endif
|
|
|
|
;
|
|
|
|
|
|
|
|
if ((opts = getenv(envname)) != NULL) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-10-24 09:37:06 +08:00
|
|
|
* Do nothing; opts is already initialized to
|
2010-12-18 10:07:53 +08:00
|
|
|
* the value of the MALLOC_CONF environment
|
|
|
|
* variable.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
} else {
|
|
|
|
/* No configuration specified. */
|
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
|
|
|
}
|
|
|
|
break;
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
default:
|
|
|
|
/* NOTREACHED */
|
|
|
|
assert(false);
|
2009-06-24 10:01:18 +08:00
|
|
|
buf[0] = '\0';
|
|
|
|
opts = buf;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
|
|
|
|
&vlen) == false) {
|
|
|
|
#define CONF_HANDLE_BOOL(n) \
|
|
|
|
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
|
|
|
|
klen) == 0) { \
|
|
|
|
if (strncmp("true", v, vlen) == 0 && \
|
|
|
|
vlen == sizeof("true")-1) \
|
|
|
|
opt_##n = true; \
|
|
|
|
else if (strncmp("false", v, vlen) == \
|
|
|
|
0 && vlen == sizeof("false")-1) \
|
|
|
|
opt_##n = false; \
|
|
|
|
else { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} \
|
|
|
|
continue; \
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
#define CONF_HANDLE_SIZE_T(n, min, max) \
|
|
|
|
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
|
|
|
|
klen) == 0) { \
|
|
|
|
unsigned long ul; \
|
|
|
|
char *end; \
|
|
|
|
\
|
|
|
|
errno = 0; \
|
|
|
|
ul = strtoul(v, &end, 0); \
|
|
|
|
if (errno != 0 || (uintptr_t)end - \
|
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else if (ul < min || ul > max) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
|
|
|
opt_##n = ul; \
|
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
#define CONF_HANDLE_SSIZE_T(n, min, max) \
|
|
|
|
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
|
|
|
|
klen) == 0) { \
|
|
|
|
long l; \
|
|
|
|
char *end; \
|
|
|
|
\
|
|
|
|
errno = 0; \
|
|
|
|
l = strtol(v, &end, 0); \
|
|
|
|
if (errno != 0 || (uintptr_t)end - \
|
|
|
|
(uintptr_t)v != vlen) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Invalid conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else if (l < (ssize_t)min || l > \
|
|
|
|
(ssize_t)max) { \
|
|
|
|
malloc_conf_error( \
|
|
|
|
"Out-of-range conf value", \
|
|
|
|
k, klen, v, vlen); \
|
|
|
|
} else \
|
|
|
|
opt_##n = l; \
|
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
#define CONF_HANDLE_CHAR_P(n, d) \
|
|
|
|
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
|
|
|
|
klen) == 0) { \
|
|
|
|
size_t cpylen = (vlen <= \
|
|
|
|
sizeof(opt_##n)-1) ? vlen : \
|
|
|
|
sizeof(opt_##n)-1; \
|
|
|
|
strncpy(opt_##n, v, cpylen); \
|
|
|
|
opt_##n[cpylen] = '\0'; \
|
|
|
|
continue; \
|
|
|
|
}
|
|
|
|
|
|
|
|
CONF_HANDLE_BOOL(abort)
|
|
|
|
CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
|
|
|
|
PAGE_SHIFT-1)
|
|
|
|
CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
|
|
|
|
PAGE_SHIFT-1)
|
|
|
|
/*
|
|
|
|
* Chunks always require at least one * header page,
|
|
|
|
* plus one data page.
|
|
|
|
*/
|
|
|
|
CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
|
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
|
|
|
|
CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
|
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
CONF_HANDLE_BOOL(stats_print)
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_FILL
|
2010-10-24 09:37:06 +08:00
|
|
|
CONF_HANDLE_BOOL(junk)
|
|
|
|
CONF_HANDLE_BOOL(zero)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
CONF_HANDLE_BOOL(sysv)
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
|
|
|
CONF_HANDLE_BOOL(xmalloc)
|
2010-03-18 07:27:39 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
|
|
|
CONF_HANDLE_BOOL(tcache)
|
|
|
|
CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
|
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
|
|
|
|
(sizeof(size_t) << 3) - 1)
|
2010-01-24 18:53:40 +08:00
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
CONF_HANDLE_BOOL(prof)
|
|
|
|
CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
|
|
|
|
CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
|
|
|
|
CONF_HANDLE_BOOL(prof_active)
|
|
|
|
CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
|
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
|
|
|
CONF_HANDLE_BOOL(prof_accum)
|
|
|
|
CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
|
|
|
|
(sizeof(size_t) << 3) - 1)
|
|
|
|
CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
|
|
|
|
(sizeof(uint64_t) << 3) - 1)
|
|
|
|
CONF_HANDLE_BOOL(prof_gdump)
|
|
|
|
CONF_HANDLE_BOOL(prof_leak)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
CONF_HANDLE_BOOL(overcommit)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
malloc_conf_error("Invalid conf pair", k, klen, v,
|
|
|
|
vlen);
|
|
|
|
#undef CONF_HANDLE_BOOL
|
|
|
|
#undef CONF_HANDLE_SIZE_T
|
|
|
|
#undef CONF_HANDLE_SSIZE_T
|
|
|
|
#undef CONF_HANDLE_CHAR_P
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate configuration of options that are inter-related. */
|
|
|
|
if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
|
|
|
|
malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
|
|
|
|
"relationship; restoring defaults\n");
|
|
|
|
opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
|
|
|
|
opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
malloc_init_hard(void)
|
|
|
|
{
|
|
|
|
arena_t *init_arenas[1];
|
|
|
|
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
if (malloc_initialized || malloc_initializer == pthread_self()) {
|
|
|
|
/*
|
|
|
|
* Another thread initialized the allocator before this one
|
|
|
|
* acquired init_lock, or this thread is the initializing
|
|
|
|
* thread, and it is recursively allocating.
|
|
|
|
*/
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
if (malloc_initializer != (unsigned long)0) {
|
|
|
|
/* Busy-wait until the initializing thread completes. */
|
|
|
|
do {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
CPU_SPINWAIT;
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
} while (malloc_initialized == false);
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DYNAMIC_PAGE_SHIFT
|
|
|
|
/* Get page size. */
|
|
|
|
{
|
|
|
|
long result;
|
|
|
|
|
|
|
|
result = sysconf(_SC_PAGESIZE);
|
|
|
|
assert(result != -1);
|
|
|
|
pagesize = (unsigned)result;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We assume that pagesize is a power of 2 when calculating
|
|
|
|
* pagesize_mask and lg_pagesize.
|
|
|
|
*/
|
|
|
|
assert(((result - 1) & result) == 0);
|
|
|
|
pagesize_mask = result - 1;
|
|
|
|
lg_pagesize = ffs((int)result) - 1;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-24 14:43:37 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
prof_boot0();
|
2010-10-24 14:43:37 +08:00
|
|
|
#endif
|
2010-10-24 09:37:06 +08:00
|
|
|
|
|
|
|
malloc_conf_init();
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-01-30 06:30:41 +08:00
|
|
|
/* Register fork handlers. */
|
|
|
|
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
|
|
|
|
jemalloc_postfork) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
|
2010-01-30 06:30:41 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
if (ctl_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2010-01-04 04:10:42 +08:00
|
|
|
if (opt_stats_print) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Print statistics at exit. */
|
2010-01-30 06:30:41 +08:00
|
|
|
if (atexit(stats_print_atexit) != 0) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in atexit()\n");
|
2010-01-30 06:30:41 +08:00
|
|
|
if (opt_abort)
|
|
|
|
abort();
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-30 06:30:41 +08:00
|
|
|
if (chunk_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 05:44:08 +08:00
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
if (base_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2010-02-12 00:59:06 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
prof_boot1();
|
2010-02-12 00:59:06 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-30 06:30:41 +08:00
|
|
|
if (arena_boot()) {
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_TCACHE
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
if (tcache_boot()) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
if (huge_boot()) {
|
2009-06-23 05:44:08 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
|
|
|
|
/* Initialize allocation counters before any allocations can occur. */
|
|
|
|
if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
|
|
|
|
!= 0) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/*
|
|
|
|
* Create enough scaffolding to allow recursive allocation in
|
|
|
|
* malloc_ncpus().
|
|
|
|
*/
|
|
|
|
narenas = 1;
|
|
|
|
arenas = init_arenas;
|
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize one arena here. The rest are lazily created in
|
|
|
|
* choose_arena_hard().
|
|
|
|
*/
|
|
|
|
arenas_extend(0);
|
|
|
|
if (arenas[0] == NULL) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assign the initial arena to the initial thread, in order to avoid
|
|
|
|
* spurious creation of an extra arena if the application switches to
|
|
|
|
* threaded mode.
|
|
|
|
*/
|
2010-09-06 01:35:13 +08:00
|
|
|
ARENA_SET(arenas[0]);
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2011-03-16 05:25:56 +08:00
|
|
|
if (malloc_mutex_init(&arenas_lock))
|
|
|
|
return (true);
|
2009-06-24 10:01:18 +08:00
|
|
|
|
2010-02-12 00:59:06 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-24 09:37:06 +08:00
|
|
|
if (prof_boot2()) {
|
2010-02-12 00:59:06 +08:00
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Get number of CPUs. */
|
|
|
|
malloc_initializer = pthread_self();
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
ncpus = malloc_ncpus();
|
|
|
|
malloc_mutex_lock(&init_lock);
|
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
if (opt_narenas == 0) {
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2009-12-29 16:09:15 +08:00
|
|
|
* For SMP systems, create more than one arena per CPU by
|
|
|
|
* default.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
2010-10-24 09:37:06 +08:00
|
|
|
if (ncpus > 1)
|
|
|
|
opt_narenas = ncpus << 2;
|
|
|
|
else
|
|
|
|
opt_narenas = 1;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-10-24 09:37:06 +08:00
|
|
|
narenas = opt_narenas;
|
|
|
|
/*
|
|
|
|
* Make sure that the arenas array can be allocated. In practice, this
|
|
|
|
* limit is enough to allow the allocator to function, but the ctl
|
|
|
|
* machinery will fail to allocate memory at far lower limits.
|
|
|
|
*/
|
|
|
|
if (narenas > chunksize / sizeof(arena_t *)) {
|
|
|
|
char buf[UMAX2S_BUFSIZE];
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-24 09:37:06 +08:00
|
|
|
narenas = chunksize / sizeof(arena_t *);
|
|
|
|
malloc_write("<jemalloc>: Reducing narenas to limit (");
|
|
|
|
malloc_write(u2s(narenas, 10, buf));
|
|
|
|
malloc_write(")\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
next_arena = (narenas > 0) ? 1 : 0;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
#ifdef NO_TLS
|
|
|
|
if (pthread_key_create(&arenas_tsd, NULL) != 0) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Allocate and initialize arenas. */
|
|
|
|
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
|
|
|
|
if (arenas == NULL) {
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Zero the array. In practice, this should always be pre-zeroed,
|
|
|
|
* since it was just mmap()ed, but let's be sure.
|
|
|
|
*/
|
|
|
|
memset(arenas, 0, sizeof(arena_t *) * narenas);
|
2009-06-24 10:01:18 +08:00
|
|
|
/* Copy the pointer to the one arena that was already initialized. */
|
|
|
|
arenas[0] = init_arenas[0];
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
#ifdef JEMALLOC_ZONE
|
|
|
|
/* Register the custom zone. */
|
|
|
|
malloc_zone_register(create_zone());
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert the default szone to an "overlay zone" that is capable of
|
|
|
|
* deallocating szone-allocated objects, but allocating new objects
|
|
|
|
* from jemalloc.
|
|
|
|
*/
|
|
|
|
szone2ozone(malloc_default_zone());
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_initialized = true;
|
|
|
|
malloc_mutex_unlock(&init_lock);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
#ifdef JEMALLOC_ZONE
|
|
|
|
JEMALLOC_ATTR(constructor)
|
|
|
|
void
|
|
|
|
jemalloc_darwin_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (malloc_init_hard())
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
2010-01-17 01:53:50 +08:00
|
|
|
* End initialization functions.
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Begin malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(malloc)(size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t usize
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= 0
|
|
|
|
# endif
|
|
|
|
;
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-09-21 10:20:48 +08:00
|
|
|
prof_thr_cnt_t *cnt
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= NULL
|
|
|
|
# endif
|
|
|
|
;
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
if (malloc_init()) {
|
|
|
|
ret = NULL;
|
2009-12-29 16:09:15 +08:00
|
|
|
goto OOM;
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (size == 0) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_sysv == false)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
size = 1;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
else {
|
2009-12-29 16:09:15 +08:00
|
|
|
# ifdef JEMALLOC_XMALLOC
|
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in malloc(): "
|
|
|
|
"invalid size 0\n");
|
2009-12-29 16:09:15 +08:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
# endif
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-04-01 07:45:04 +08:00
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(size);
|
|
|
|
if ((cnt = prof_alloc_prep(usize)) == NULL) {
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto OOM;
|
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
|
2010-04-01 07:45:04 +08:00
|
|
|
small_maxclass) {
|
|
|
|
ret = imalloc(small_maxclass+1);
|
|
|
|
if (ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(ret, usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
} else
|
|
|
|
ret = imalloc(size);
|
|
|
|
} else
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = s2u(size);
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = imalloc(size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
OOM:
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in malloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
RETURN:
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof && ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
prof_malloc(ret, usize, cnt);
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
if (ret != NULL) {
|
|
|
|
assert(usize == isalloc(ret));
|
|
|
|
ALLOCATED_ADD(usize, 0);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
int
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
void *result;
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t usize
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= 0
|
|
|
|
# endif
|
|
|
|
;
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-09-21 10:20:48 +08:00
|
|
|
prof_thr_cnt_t *cnt
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= NULL
|
|
|
|
# endif
|
|
|
|
;
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
if (malloc_init())
|
|
|
|
result = NULL;
|
|
|
|
else {
|
2009-12-29 16:09:15 +08:00
|
|
|
if (size == 0) {
|
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
if (opt_sysv == false)
|
|
|
|
#endif
|
|
|
|
size = 1;
|
|
|
|
#ifdef JEMALLOC_SYSV
|
|
|
|
else {
|
|
|
|
# ifdef JEMALLOC_XMALLOC
|
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in "
|
|
|
|
"posix_memalign(): invalid size "
|
|
|
|
"0\n");
|
2009-12-29 16:09:15 +08:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
result = NULL;
|
|
|
|
*memptr = NULL;
|
|
|
|
ret = 0;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/* Make sure that alignment is a large enough power of 2. */
|
|
|
|
if (((alignment - 1) & alignment) != 0
|
|
|
|
|| alignment < sizeof(void *)) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in "
|
|
|
|
"posix_memalign(): invalid alignment\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-04-01 07:45:04 +08:00
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = sa2u(size, alignment, NULL);
|
|
|
|
if ((cnt = prof_alloc_prep(usize)) == NULL) {
|
2010-04-01 07:45:04 +08:00
|
|
|
result = NULL;
|
|
|
|
ret = EINVAL;
|
|
|
|
} else {
|
|
|
|
if (prof_promote && (uintptr_t)cnt !=
|
2010-10-21 08:39:18 +08:00
|
|
|
(uintptr_t)1U && usize <= small_maxclass) {
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
result = ipalloc(small_maxclass+1,
|
|
|
|
alignment, false);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (result != NULL) {
|
|
|
|
arena_prof_promoted(result,
|
2010-10-21 08:39:18 +08:00
|
|
|
usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else {
|
|
|
|
result = ipalloc(size, alignment,
|
|
|
|
false);
|
|
|
|
}
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
} else
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = sa2u(size, alignment, NULL);
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
result = ipalloc(size, alignment, false);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (result == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in posix_memalign(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = ENOMEM;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
*memptr = result;
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
RETURN:
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
if (result != NULL) {
|
|
|
|
assert(usize == isalloc(result));
|
|
|
|
ALLOCATED_ADD(usize, 0);
|
|
|
|
}
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof && result != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
prof_malloc(result, usize, cnt);
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-04 03:59:20 +08:00
|
|
|
JEMALLOC_ATTR(malloc)
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(calloc)(size_t num, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
size_t num_size;
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t usize
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= 0
|
|
|
|
# endif
|
|
|
|
;
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-09-21 10:20:48 +08:00
|
|
|
prof_thr_cnt_t *cnt
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= NULL
|
|
|
|
# endif
|
|
|
|
;
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
if (malloc_init()) {
|
|
|
|
num_size = 0;
|
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
num_size = num * size;
|
|
|
|
if (num_size == 0) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
if ((opt_sysv == false) && ((num == 0) || (size == 0)))
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
num_size = 1;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
else {
|
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Try to avoid division here. We know that it isn't possible to
|
|
|
|
* overflow during multiplication if neither operand uses any of the
|
|
|
|
* most significant half of the bits in a size_t.
|
|
|
|
*/
|
|
|
|
} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
|
|
|
|
&& (num_size / size != num)) {
|
|
|
|
/* size_t overflow. */
|
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-04-01 07:45:04 +08:00
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(num_size);
|
|
|
|
if ((cnt = prof_alloc_prep(usize)) == NULL) {
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
|
2010-04-01 07:45:04 +08:00
|
|
|
<= small_maxclass) {
|
|
|
|
ret = icalloc(small_maxclass+1);
|
|
|
|
if (ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(ret, usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
} else
|
|
|
|
ret = icalloc(num_size);
|
|
|
|
} else
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = s2u(num_size);
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = icalloc(num_size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
RETURN:
|
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in calloc(): out of "
|
|
|
|
"memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof && ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
prof_malloc(ret, usize, cnt);
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
if (ret != NULL) {
|
|
|
|
assert(usize == isalloc(ret));
|
|
|
|
ALLOCATED_ADD(usize, 0);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void *
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(realloc)(void *ptr, size_t size)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t usize
|
2010-09-21 10:20:48 +08:00
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= 0
|
|
|
|
# endif
|
|
|
|
;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t old_size = 0;
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_PROF
|
2010-09-21 10:20:48 +08:00
|
|
|
prof_thr_cnt_t *cnt
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= NULL
|
|
|
|
# endif
|
|
|
|
;
|
|
|
|
prof_ctx_t *old_ctx
|
|
|
|
# ifdef JEMALLOC_CC_SILENCE
|
|
|
|
= NULL
|
|
|
|
# endif
|
|
|
|
;
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (size == 0) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_sysv == false)
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
size = 1;
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
else {
|
2010-01-17 01:53:50 +08:00
|
|
|
if (ptr != NULL) {
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
old_size = isalloc(ptr);
|
|
|
|
#endif
|
2010-02-12 05:38:12 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof) {
|
2010-04-14 07:13:54 +08:00
|
|
|
old_ctx = prof_ctx_get(ptr);
|
2010-02-11 02:37:56 +08:00
|
|
|
cnt = NULL;
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
idalloc(ptr);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2010-02-12 05:38:12 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
else if (opt_prof) {
|
2010-04-14 07:13:54 +08:00
|
|
|
old_ctx = NULL;
|
2010-02-11 02:37:56 +08:00
|
|
|
cnt = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto RETURN;
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ptr != NULL) {
|
2009-11-10 06:57:38 +08:00
|
|
|
assert(malloc_initialized || malloc_initializer ==
|
|
|
|
pthread_self());
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-21 08:39:18 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
old_size = isalloc(ptr);
|
|
|
|
#endif
|
2010-02-12 05:38:12 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(size);
|
2010-04-14 07:13:54 +08:00
|
|
|
old_ctx = prof_ctx_get(ptr);
|
2010-10-21 08:39:18 +08:00
|
|
|
if ((cnt = prof_alloc_prep(usize)) == NULL) {
|
2010-02-11 02:37:56 +08:00
|
|
|
ret = NULL;
|
|
|
|
goto OOM;
|
|
|
|
}
|
2010-04-01 07:45:04 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
|
2010-10-21 08:39:18 +08:00
|
|
|
usize <= small_maxclass) {
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
ret = iralloc(ptr, small_maxclass+1, 0, 0,
|
|
|
|
false, false);
|
2010-04-01 07:45:04 +08:00
|
|
|
if (ret != NULL)
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(ret, usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
} else
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
ret = iralloc(ptr, size, 0, 0, false, false);
|
2010-04-01 07:45:04 +08:00
|
|
|
} else
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = s2u(size);
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
ret = iralloc(ptr, size, 0, 0, false, false);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
OOM:
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in realloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
} else {
|
2010-02-12 05:38:12 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-21 08:39:18 +08:00
|
|
|
if (opt_prof)
|
2010-04-14 07:13:54 +08:00
|
|
|
old_ctx = NULL;
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
|
|
|
if (malloc_init()) {
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof)
|
|
|
|
cnt = NULL;
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
ret = NULL;
|
|
|
|
} else {
|
|
|
|
#ifdef JEMALLOC_PROF
|
2010-04-01 07:45:04 +08:00
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = s2u(size);
|
|
|
|
if ((cnt = prof_alloc_prep(usize)) == NULL)
|
2010-04-01 07:45:04 +08:00
|
|
|
ret = NULL;
|
|
|
|
else {
|
|
|
|
if (prof_promote && (uintptr_t)cnt !=
|
2010-10-21 08:39:18 +08:00
|
|
|
(uintptr_t)1U && usize <=
|
2010-04-01 07:45:04 +08:00
|
|
|
small_maxclass) {
|
|
|
|
ret = imalloc(small_maxclass+1);
|
|
|
|
if (ret != NULL) {
|
|
|
|
arena_prof_promoted(ret,
|
2010-10-21 08:39:18 +08:00
|
|
|
usize);
|
2010-04-01 07:45:04 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
ret = imalloc(size);
|
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
} else
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = s2u(size);
|
2010-02-11 02:37:56 +08:00
|
|
|
#endif
|
|
|
|
ret = imalloc(size);
|
2010-10-21 08:39:18 +08:00
|
|
|
}
|
2010-02-11 02:37:56 +08:00
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
if (ret == NULL) {
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_XMALLOC
|
2009-06-23 03:08:42 +08:00
|
|
|
if (opt_xmalloc) {
|
2010-03-04 09:45:38 +08:00
|
|
|
malloc_write("<jemalloc>: Error in realloc(): "
|
|
|
|
"out of memory\n");
|
2009-06-23 03:08:42 +08:00
|
|
|
abort();
|
|
|
|
}
|
2009-06-24 10:01:18 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
errno = ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-06-30 00:41:43 +08:00
|
|
|
#ifdef JEMALLOC_SYSV
|
2009-06-23 03:08:42 +08:00
|
|
|
RETURN:
|
2009-06-30 00:41:43 +08:00
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof)
|
2010-10-23 01:45:59 +08:00
|
|
|
prof_realloc(ret, usize, cnt, old_size, old_ctx);
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
if (ret != NULL) {
|
|
|
|
assert(usize == isalloc(ret));
|
|
|
|
ALLOCATED_ADD(usize, old_size);
|
|
|
|
}
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
void
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(free)(void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
if (ptr != NULL) {
|
2010-10-23 01:45:59 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t usize;
|
|
|
|
#endif
|
|
|
|
|
2009-11-10 06:57:38 +08:00
|
|
|
assert(malloc_initialized || malloc_initializer ==
|
|
|
|
pthread_self());
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2010-10-23 01:45:59 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = isalloc(ptr);
|
|
|
|
#endif
|
2010-02-11 02:37:56 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-23 01:45:59 +08:00
|
|
|
if (opt_prof) {
|
|
|
|
# ifndef JEMALLOC_STATS
|
|
|
|
usize = isalloc(ptr);
|
|
|
|
# endif
|
|
|
|
prof_free(ptr, usize);
|
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_STATS
|
2010-10-23 01:45:59 +08:00
|
|
|
ALLOCATED_ADD(0, usize);
|
2009-12-29 16:09:15 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
idalloc(ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* End malloc(3)-compatible functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2010-09-21 07:44:23 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard override functions.
|
|
|
|
*
|
|
|
|
* These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
|
|
|
|
* entire point is to avoid accidental mixed allocator usage.
|
|
|
|
*/
|
|
|
|
#ifndef JEMALLOC_PREFIX
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
|
|
|
JEMALLOC_ATTR(malloc)
|
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
void *
|
|
|
|
JEMALLOC_P(memalign)(size_t alignment, size_t size)
|
|
|
|
{
|
|
|
|
void *ret;
|
2010-09-21 10:20:48 +08:00
|
|
|
#ifdef JEMALLOC_CC_SILENCE
|
|
|
|
int result =
|
|
|
|
#endif
|
|
|
|
JEMALLOC_P(posix_memalign)(&ret, alignment, size);
|
|
|
|
#ifdef JEMALLOC_CC_SILENCE
|
|
|
|
if (result != 0)
|
|
|
|
return (NULL);
|
|
|
|
#endif
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
|
|
|
JEMALLOC_ATTR(malloc)
|
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
void *
|
|
|
|
JEMALLOC_P(valloc)(size_t size)
|
|
|
|
{
|
|
|
|
void *ret;
|
2010-09-21 10:20:48 +08:00
|
|
|
#ifdef JEMALLOC_CC_SILENCE
|
|
|
|
int result =
|
|
|
|
#endif
|
|
|
|
JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
|
|
|
|
#ifdef JEMALLOC_CC_SILENCE
|
|
|
|
if (result != 0)
|
|
|
|
return (NULL);
|
|
|
|
#endif
|
2010-09-21 07:44:23 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_PREFIX */
|
|
|
|
/*
|
|
|
|
* End non-standard override functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* Begin non-standard functions.
|
|
|
|
*/
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-06-23 03:08:42 +08:00
|
|
|
size_t
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_P(malloc_usable_size)(const void *ptr)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2009-12-29 16:09:15 +08:00
|
|
|
size_t ret;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
assert(malloc_initialized || malloc_initializer == pthread_self());
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
#ifdef JEMALLOC_IVSALLOC
|
|
|
|
ret = ivsalloc(ptr);
|
|
|
|
#else
|
2009-06-23 03:08:42 +08:00
|
|
|
assert(ptr != NULL);
|
2009-12-29 16:09:15 +08:00
|
|
|
ret = isalloc(ptr);
|
2010-09-06 01:35:13 +08:00
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
|
2009-12-29 16:09:15 +08:00
|
|
|
return (ret);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2009-12-29 16:09:15 +08:00
|
|
|
void
|
2010-03-04 09:45:38 +08:00
|
|
|
JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
|
|
|
|
void *cbopaque, const char *opts)
|
2009-12-29 16:09:15 +08:00
|
|
|
{
|
|
|
|
|
2010-03-04 09:45:38 +08:00
|
|
|
stats_print(write_cb, cbopaque, opts);
|
2010-01-28 05:10:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
int
|
|
|
|
JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
|
|
|
|
size_t newlen)
|
|
|
|
{
|
2009-12-29 16:09:15 +08:00
|
|
|
|
2010-01-28 05:45:21 +08:00
|
|
|
if (malloc_init())
|
|
|
|
return (EAGAIN);
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
|
2009-12-29 16:09:15 +08:00
|
|
|
}
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
2010-01-28 05:10:55 +08:00
|
|
|
int
|
|
|
|
JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
|
2010-01-28 05:45:21 +08:00
|
|
|
if (malloc_init())
|
|
|
|
return (EAGAIN);
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ctl_nametomib(name, mibp, miblenp));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
int
|
|
|
|
JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
|
|
|
|
size_t *oldlenp, void *newp, size_t newlen)
|
|
|
|
{
|
|
|
|
|
2010-01-28 05:45:21 +08:00
|
|
|
if (malloc_init())
|
|
|
|
return (EAGAIN);
|
|
|
|
|
2010-01-28 05:10:55 +08:00
|
|
|
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
|
|
|
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
iallocm(size_t size, size_t alignment, bool zero)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (alignment != 0)
|
|
|
|
return (ipalloc(size, alignment, zero));
|
|
|
|
else if (zero)
|
|
|
|
return (icalloc(size));
|
|
|
|
else
|
|
|
|
return (imalloc(size));
|
|
|
|
}
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
int
|
|
|
|
JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
|
|
|
|
{
|
|
|
|
void *p;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
|
|
|
|
& (SIZE_T_MAX-1));
|
|
|
|
bool zero = flags & ALLOCM_ZERO;
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
prof_thr_cnt_t *cnt;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
|
|
|
|
if (malloc_init())
|
|
|
|
goto OOM;
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment,
|
|
|
|
NULL);
|
|
|
|
if ((cnt = prof_alloc_prep(usize)) == NULL)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
goto OOM;
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
small_maxclass) {
|
|
|
|
p = iallocm(small_maxclass+1, alignment, zero);
|
|
|
|
if (p == NULL)
|
|
|
|
goto OOM;
|
2010-10-21 08:39:18 +08:00
|
|
|
arena_prof_promoted(p, usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else {
|
|
|
|
p = iallocm(size, alignment, zero);
|
|
|
|
if (p == NULL)
|
|
|
|
goto OOM;
|
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
|
|
|
|
if (rsize != NULL)
|
|
|
|
*rsize = usize;
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
p = iallocm(size, alignment, zero);
|
|
|
|
if (p == NULL)
|
|
|
|
goto OOM;
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifndef JEMALLOC_STATS
|
|
|
|
if (rsize != NULL)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
usize = (alignment == 0) ? s2u(size) : sa2u(size,
|
|
|
|
alignment, NULL);
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
if (rsize != NULL)
|
|
|
|
#endif
|
|
|
|
*rsize = usize;
|
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*ptr = p;
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
assert(usize == isalloc(p));
|
|
|
|
ALLOCATED_ADD(usize, 0);
|
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_SUCCESS);
|
|
|
|
OOM:
|
|
|
|
#ifdef JEMALLOC_XMALLOC
|
|
|
|
if (opt_xmalloc) {
|
|
|
|
malloc_write("<jemalloc>: Error in allocm(): "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
*ptr = NULL;
|
|
|
|
return (ALLOCM_ERR_OOM);
|
|
|
|
}
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
int
|
|
|
|
JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
void *p, *q;
|
2010-10-21 08:39:18 +08:00
|
|
|
size_t usize;
|
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t old_size;
|
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
|
|
|
|
& (SIZE_T_MAX-1));
|
|
|
|
bool zero = flags & ALLOCM_ZERO;
|
|
|
|
bool no_move = flags & ALLOCM_NO_MOVE;
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
prof_thr_cnt_t *cnt;
|
|
|
|
prof_ctx_t *old_ctx;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(*ptr != NULL);
|
|
|
|
assert(size != 0);
|
|
|
|
assert(SIZE_T_MAX - size >= extra);
|
|
|
|
assert(malloc_initialized || malloc_initializer == pthread_self());
|
|
|
|
|
|
|
|
p = *ptr;
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
if (opt_prof) {
|
2010-10-21 08:39:18 +08:00
|
|
|
/*
|
|
|
|
* usize isn't knowable before iralloc() returns when extra is
|
|
|
|
* non-zero. Therefore, compute its maximum possible value and
|
|
|
|
* use that in prof_alloc_prep() to decide whether to capture a
|
|
|
|
* backtrace. prof_realloc() will use the actual usize to
|
|
|
|
* decide whether to sample.
|
|
|
|
*/
|
|
|
|
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
|
|
|
|
sa2u(size+extra, alignment, NULL);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
old_size = isalloc(p);
|
|
|
|
old_ctx = prof_ctx_get(p);
|
2010-10-21 08:39:18 +08:00
|
|
|
if ((cnt = prof_alloc_prep(max_usize)) == NULL)
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
goto OOM;
|
2010-10-21 08:39:18 +08:00
|
|
|
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && max_usize
|
|
|
|
<= small_maxclass) {
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
|
|
|
|
size+extra) ? 0 : size+extra - (small_maxclass+1),
|
|
|
|
alignment, zero, no_move);
|
|
|
|
if (q == NULL)
|
|
|
|
goto ERR;
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = isalloc(q);
|
|
|
|
arena_prof_promoted(q, usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else {
|
|
|
|
q = iralloc(p, size, extra, alignment, zero, no_move);
|
|
|
|
if (q == NULL)
|
|
|
|
goto ERR;
|
2010-10-21 08:39:18 +08:00
|
|
|
usize = isalloc(q);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
2010-10-23 01:45:59 +08:00
|
|
|
prof_realloc(q, usize, cnt, old_size, old_ctx);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
old_size = isalloc(p);
|
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
q = iralloc(p, size, extra, alignment, zero, no_move);
|
|
|
|
if (q == NULL)
|
|
|
|
goto ERR;
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifndef JEMALLOC_STATS
|
|
|
|
if (rsize != NULL)
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
usize = isalloc(q);
|
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
if (rsize != NULL)
|
|
|
|
#endif
|
|
|
|
*rsize = usize;
|
|
|
|
}
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*ptr = q;
|
2010-10-21 08:39:18 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
ALLOCATED_ADD(usize, old_size);
|
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
return (ALLOCM_SUCCESS);
|
|
|
|
ERR:
|
|
|
|
if (no_move)
|
|
|
|
return (ALLOCM_ERR_NOT_MOVED);
|
|
|
|
#ifdef JEMALLOC_PROF
|
|
|
|
OOM:
|
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_XMALLOC
|
|
|
|
if (opt_xmalloc) {
|
|
|
|
malloc_write("<jemalloc>: Error in rallocm(): "
|
|
|
|
"out of memory\n");
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return (ALLOCM_ERR_OOM);
|
|
|
|
}
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
int
|
|
|
|
JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
|
|
|
|
{
|
|
|
|
size_t sz;
|
|
|
|
|
|
|
|
assert(malloc_initialized || malloc_initializer == pthread_self());
|
|
|
|
|
|
|
|
#ifdef JEMALLOC_IVSALLOC
|
|
|
|
sz = ivsalloc(ptr);
|
|
|
|
#else
|
|
|
|
assert(ptr != NULL);
|
|
|
|
sz = isalloc(ptr);
|
|
|
|
#endif
|
|
|
|
assert(rsize != NULL);
|
|
|
|
*rsize = sz;
|
|
|
|
|
|
|
|
return (ALLOCM_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2010-09-21 07:44:23 +08:00
|
|
|
JEMALLOC_ATTR(nonnull(1))
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
JEMALLOC_ATTR(visibility("default"))
|
|
|
|
int
|
|
|
|
JEMALLOC_P(dallocm)(void *ptr, int flags)
|
|
|
|
{
|
2010-10-23 01:45:59 +08:00
|
|
|
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
|
|
|
|
size_t usize;
|
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
|
|
|
|
assert(ptr != NULL);
|
|
|
|
assert(malloc_initialized || malloc_initializer == pthread_self());
|
|
|
|
|
2010-10-23 01:45:59 +08:00
|
|
|
#ifdef JEMALLOC_STATS
|
|
|
|
usize = isalloc(ptr);
|
|
|
|
#endif
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
#ifdef JEMALLOC_PROF
|
2010-10-23 01:45:59 +08:00
|
|
|
if (opt_prof) {
|
|
|
|
# ifndef JEMALLOC_STATS
|
|
|
|
usize = isalloc(ptr);
|
|
|
|
# endif
|
|
|
|
prof_free(ptr, usize);
|
|
|
|
}
|
2010-10-21 08:39:18 +08:00
|
|
|
#endif
|
|
|
|
#ifdef JEMALLOC_STATS
|
2010-10-23 01:45:59 +08:00
|
|
|
ALLOCATED_ADD(0, usize);
|
Add {,r,s,d}allocm().
Add allocm(), rallocm(), sallocm(), and dallocm(), which are a
functional superset of malloc(), calloc(), posix_memalign(),
malloc_usable_size(), and free().
2010-09-18 06:46:18 +08:00
|
|
|
#endif
|
|
|
|
idalloc(ptr);
|
|
|
|
|
|
|
|
return (ALLOCM_SUCCESS);
|
|
|
|
}
|
|
|
|
|
2009-06-23 03:08:42 +08:00
|
|
|
/*
|
|
|
|
* End non-standard functions.
|
|
|
|
*/
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following functions are used by threading libraries for protection of
|
2010-09-21 02:24:24 +08:00
|
|
|
* malloc during fork().
|
2009-06-23 03:08:42 +08:00
|
|
|
*/
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_prefork(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
2010-01-25 09:56:48 +08:00
|
|
|
unsigned i;
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
/* Acquire all mutexes in a safe order. */
|
|
|
|
|
2010-01-25 09:56:48 +08:00
|
|
|
malloc_mutex_lock(&arenas_lock);
|
|
|
|
for (i = 0; i < narenas; i++) {
|
|
|
|
if (arenas[i] != NULL)
|
|
|
|
malloc_mutex_lock(&arenas[i]->lock);
|
|
|
|
}
|
2009-06-23 03:08:42 +08:00
|
|
|
|
|
|
|
malloc_mutex_lock(&base_mtx);
|
|
|
|
|
|
|
|
malloc_mutex_lock(&huge_mtx);
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_mutex_lock(&dss_mtx);
|
|
|
|
#endif
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
malloc_mutex_lock(&swap_mtx);
|
|
|
|
#endif
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
void
|
2009-06-23 08:44:33 +08:00
|
|
|
jemalloc_postfork(void)
|
2009-06-23 03:08:42 +08:00
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
/* Release all mutexes, now that fork() has completed. */
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
#ifdef JEMALLOC_SWAP
|
|
|
|
malloc_mutex_unlock(&swap_mtx);
|
|
|
|
#endif
|
|
|
|
|
2009-06-24 10:01:18 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2009-06-23 03:08:42 +08:00
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
malloc_mutex_unlock(&huge_mtx);
|
|
|
|
|
|
|
|
malloc_mutex_unlock(&base_mtx);
|
|
|
|
|
|
|
|
for (i = 0; i < narenas; i++) {
|
2010-01-25 09:56:48 +08:00
|
|
|
if (arenas[i] != NULL)
|
|
|
|
malloc_mutex_unlock(&arenas[i]->lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-01-25 09:56:48 +08:00
|
|
|
malloc_mutex_unlock(&arenas_lock);
|
2009-06-23 03:08:42 +08:00
|
|
|
}
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|