2010-09-06 01:35:13 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
|
|
|
#ifndef JEMALLOC_ZONE
|
|
|
|
# error "This source file is for zones on Darwin (OS X)."
|
|
|
|
#endif
|
|
|
|
|
2012-03-27 20:20:13 +08:00
|
|
|
/*
|
|
|
|
* The malloc_default_purgeable_zone function is only available on >= 10.6.
|
|
|
|
* We need to check whether it is present at runtime, thus the weak_import.
|
|
|
|
*/
|
|
|
|
extern malloc_zone_t *malloc_default_purgeable_zone(void)
|
|
|
|
JEMALLOC_ATTR(weak_import);
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2012-03-21 01:01:38 +08:00
|
|
|
static malloc_zone_t zone;
|
|
|
|
static struct malloc_introspection_t zone_introspect;
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Function prototypes for non-inline static functions. */
|
|
|
|
|
|
|
|
static size_t zone_size(malloc_zone_t *zone, void *ptr);
|
|
|
|
static void *zone_malloc(malloc_zone_t *zone, size_t size);
|
|
|
|
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
|
|
|
|
static void *zone_valloc(malloc_zone_t *zone, size_t size);
|
|
|
|
static void zone_free(malloc_zone_t *zone, void *ptr);
|
|
|
|
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
|
2012-03-21 01:01:38 +08:00
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 5)
|
2010-09-06 01:35:13 +08:00
|
|
|
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
|
2012-03-21 01:01:38 +08:00
|
|
|
#endif
|
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
2010-09-06 01:35:13 +08:00
|
|
|
size_t size);
|
|
|
|
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
|
|
|
|
size_t size);
|
|
|
|
#endif
|
|
|
|
static void *zone_destroy(malloc_zone_t *zone);
|
|
|
|
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
|
|
|
|
static void zone_force_lock(malloc_zone_t *zone);
|
|
|
|
static void zone_force_unlock(malloc_zone_t *zone);
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/*
|
|
|
|
* Functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
zone_size(malloc_zone_t *zone, void *ptr)
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There appear to be places within Darwin (such as setenv(3)) that
|
|
|
|
* cause calls to this function with pointers that *no* zone owns. If
|
|
|
|
* we knew that all pointers were owned by *some* zone, we could split
|
|
|
|
* our zone into two parts, and use one as the default allocator and
|
|
|
|
* the other as the default deallocator/reallocator. Since that will
|
|
|
|
* not work in practice, we must check all pointers to assure that they
|
|
|
|
* reside within a mapped chunk before determining size.
|
|
|
|
*/
|
2012-04-06 15:35:09 +08:00
|
|
|
return (ivsalloc(ptr, config_prof));
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
zone_malloc(malloc_zone_t *zone, size_t size)
|
|
|
|
{
|
|
|
|
|
2012-03-02 09:19:20 +08:00
|
|
|
return (je_malloc(size));
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
|
|
|
|
{
|
|
|
|
|
2012-03-02 09:19:20 +08:00
|
|
|
return (je_calloc(num, size));
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
zone_valloc(malloc_zone_t *zone, size_t size)
|
|
|
|
{
|
|
|
|
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
|
|
|
|
2012-04-02 22:04:34 +08:00
|
|
|
je_posix_memalign(&ret, PAGE, size);
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zone_free(malloc_zone_t *zone, void *ptr)
|
|
|
|
{
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
if (ivsalloc(ptr, config_prof) != 0) {
|
2012-03-27 00:39:35 +08:00
|
|
|
je_free(ptr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(ptr);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
|
|
|
|
{
|
|
|
|
|
2012-04-06 15:35:09 +08:00
|
|
|
if (ivsalloc(ptr, config_prof) != 0)
|
2012-03-27 00:39:35 +08:00
|
|
|
return (je_realloc(ptr, size));
|
|
|
|
|
|
|
|
return (realloc(ptr, size));
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
2012-03-21 01:01:38 +08:00
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 5)
|
2010-09-06 01:35:13 +08:00
|
|
|
static void *
|
|
|
|
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
|
|
|
|
{
|
|
|
|
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
|
|
|
|
2012-03-02 09:19:20 +08:00
|
|
|
je_posix_memalign(&ret, alignment, size);
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
2012-03-21 01:01:38 +08:00
|
|
|
#endif
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2012-03-21 01:01:38 +08:00
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
2010-09-06 01:35:13 +08:00
|
|
|
static void
|
|
|
|
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
|
|
|
|
{
|
2015-11-12 19:59:29 +08:00
|
|
|
size_t alloc_size;
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2015-11-12 19:59:29 +08:00
|
|
|
alloc_size = ivsalloc(ptr, config_prof);
|
|
|
|
if (alloc_size != 0) {
|
|
|
|
assert(alloc_size == size);
|
2012-03-27 00:39:35 +08:00
|
|
|
je_free(ptr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(ptr);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void *
|
|
|
|
zone_destroy(malloc_zone_t *zone)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* This function should never be called. */
|
2013-10-22 05:56:27 +08:00
|
|
|
not_reached();
|
2010-09-06 01:35:13 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
|
|
|
zone_good_size(malloc_zone_t *zone, size_t size)
|
|
|
|
{
|
|
|
|
|
2012-03-01 04:58:39 +08:00
|
|
|
if (size == 0)
|
|
|
|
size = 1;
|
|
|
|
return (s2u(size));
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zone_force_lock(malloc_zone_t *zone)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (isthreaded)
|
|
|
|
jemalloc_prefork();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zone_force_unlock(malloc_zone_t *zone)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (isthreaded)
|
2012-03-21 01:03:09 +08:00
|
|
|
jemalloc_postfork_parent();
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|
|
|
|
|
2012-04-02 15:04:54 +08:00
|
|
|
JEMALLOC_ATTR(constructor)
|
2012-03-27 20:20:12 +08:00
|
|
|
void
|
|
|
|
register_zone(void)
|
2010-09-06 01:35:13 +08:00
|
|
|
{
|
|
|
|
|
2012-10-23 14:42:48 +08:00
|
|
|
/*
|
|
|
|
* If something else replaced the system default zone allocator, don't
|
|
|
|
* register jemalloc's.
|
|
|
|
*/
|
|
|
|
malloc_zone_t *default_zone = malloc_default_zone();
|
2014-06-10 17:18:22 +08:00
|
|
|
malloc_zone_t *purgeable_zone = NULL;
|
2012-10-23 14:42:48 +08:00
|
|
|
if (!default_zone->zone_name ||
|
|
|
|
strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-09-06 01:35:13 +08:00
|
|
|
zone.size = (void *)zone_size;
|
|
|
|
zone.malloc = (void *)zone_malloc;
|
|
|
|
zone.calloc = (void *)zone_calloc;
|
|
|
|
zone.valloc = (void *)zone_valloc;
|
|
|
|
zone.free = (void *)zone_free;
|
|
|
|
zone.realloc = (void *)zone_realloc;
|
|
|
|
zone.destroy = (void *)zone_destroy;
|
|
|
|
zone.zone_name = "jemalloc_zone";
|
|
|
|
zone.batch_malloc = NULL;
|
|
|
|
zone.batch_free = NULL;
|
|
|
|
zone.introspect = &zone_introspect;
|
|
|
|
zone.version = JEMALLOC_ZONE_VERSION;
|
2012-03-21 01:01:38 +08:00
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 5)
|
2010-09-06 01:35:13 +08:00
|
|
|
zone.memalign = zone_memalign;
|
2012-03-21 01:01:38 +08:00
|
|
|
#endif
|
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
2010-09-06 01:35:13 +08:00
|
|
|
zone.free_definite_size = zone_free_definite_size;
|
|
|
|
#endif
|
2012-03-21 01:01:38 +08:00
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 8)
|
|
|
|
zone.pressure_relief = NULL;
|
|
|
|
#endif
|
2010-09-06 01:35:13 +08:00
|
|
|
|
|
|
|
zone_introspect.enumerator = NULL;
|
|
|
|
zone_introspect.good_size = (void *)zone_good_size;
|
|
|
|
zone_introspect.check = NULL;
|
|
|
|
zone_introspect.print = NULL;
|
|
|
|
zone_introspect.log = NULL;
|
|
|
|
zone_introspect.force_lock = (void *)zone_force_lock;
|
|
|
|
zone_introspect.force_unlock = (void *)zone_force_unlock;
|
|
|
|
zone_introspect.statistics = NULL;
|
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 6)
|
|
|
|
zone_introspect.zone_locked = NULL;
|
|
|
|
#endif
|
2012-03-21 01:01:38 +08:00
|
|
|
#if (JEMALLOC_ZONE_VERSION >= 7)
|
|
|
|
zone_introspect.enable_discharge_checking = NULL;
|
|
|
|
zone_introspect.disable_discharge_checking = NULL;
|
|
|
|
zone_introspect.discharge = NULL;
|
|
|
|
#ifdef __BLOCKS__
|
|
|
|
zone_introspect.enumerate_discharged_pointers = NULL;
|
|
|
|
#else
|
|
|
|
zone_introspect.enumerate_unavailable_without_blocks = NULL;
|
2010-09-06 01:35:13 +08:00
|
|
|
#endif
|
|
|
|
#endif
|
2012-03-27 20:20:12 +08:00
|
|
|
|
2012-03-27 20:20:13 +08:00
|
|
|
/*
|
|
|
|
* The default purgeable zone is created lazily by OSX's libc. It uses
|
|
|
|
* the default zone when it is created for "small" allocations
|
|
|
|
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
|
|
|
|
* obviously fails when the default zone is the jemalloc zone, so
|
|
|
|
* malloc_default_purgeable_zone is called beforehand so that the
|
|
|
|
* default purgeable zone is created when the default zone is still
|
|
|
|
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
|
|
|
|
* to check for the existence of malloc_default_purgeable_zone() at
|
|
|
|
* run time.
|
|
|
|
*/
|
|
|
|
if (malloc_default_purgeable_zone != NULL)
|
2014-06-10 17:18:22 +08:00
|
|
|
purgeable_zone = malloc_default_purgeable_zone();
|
2012-03-27 20:20:13 +08:00
|
|
|
|
|
|
|
/* Register the custom zone. At this point it won't be the default. */
|
2012-03-27 20:20:12 +08:00
|
|
|
malloc_zone_register(&zone);
|
|
|
|
|
|
|
|
do {
|
2012-10-23 14:42:48 +08:00
|
|
|
default_zone = malloc_default_zone();
|
2014-06-10 17:18:22 +08:00
|
|
|
/*
|
|
|
|
* Unregister and reregister the default zone. On OSX >= 10.6,
|
|
|
|
* unregistering takes the last registered zone and places it
|
|
|
|
* at the location of the specified zone. Unregistering the
|
|
|
|
* default zone thus makes the last registered one the default.
|
|
|
|
* On OSX < 10.6, unregistering shifts all registered zones.
|
|
|
|
* The first registered zone then becomes the default.
|
|
|
|
*/
|
2012-03-27 20:20:12 +08:00
|
|
|
malloc_zone_unregister(default_zone);
|
|
|
|
malloc_zone_register(default_zone);
|
2014-06-10 17:18:22 +08:00
|
|
|
/*
|
|
|
|
* On OSX 10.6, having the default purgeable zone appear before
|
|
|
|
* the default zone makes some things crash because it thinks it
|
2014-09-05 13:27:26 +08:00
|
|
|
* owns the default zone allocated pointers. We thus
|
|
|
|
* unregister/re-register it in order to ensure it's always
|
|
|
|
* after the default zone. On OSX < 10.6, there is no purgeable
|
|
|
|
* zone, so this does nothing. On OSX >= 10.6, unregistering
|
|
|
|
* replaces the purgeable zone with the last registered zone
|
2014-12-09 06:40:14 +08:00
|
|
|
* above, i.e. the default zone. Registering it again then puts
|
2014-09-05 13:27:26 +08:00
|
|
|
* it at the end, obviously after the default zone.
|
2014-06-10 17:18:22 +08:00
|
|
|
*/
|
|
|
|
if (purgeable_zone) {
|
|
|
|
malloc_zone_unregister(purgeable_zone);
|
|
|
|
malloc_zone_register(purgeable_zone);
|
|
|
|
}
|
2012-03-27 20:20:12 +08:00
|
|
|
} while (malloc_default_zone() != &zone);
|
2010-09-06 01:35:13 +08:00
|
|
|
}
|