2010-01-24 18:53:40 +08:00
|
|
|
#define JEMALLOC_CHUNK_DSS_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-24 18:53:40 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
const char *dss_prec_names[] = {
|
|
|
|
"disabled",
|
|
|
|
"primary",
|
|
|
|
"secondary",
|
|
|
|
"N/A"
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Current dss precedence default, used when creating new arenas. */
|
|
|
|
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
|
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
/*
|
|
|
|
* Protects sbrk() calls. This avoids malloc races among threads, though it
|
|
|
|
* does not protect against races with threads that call sbrk() directly.
|
|
|
|
*/
|
|
|
|
static malloc_mutex_t dss_mtx;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/* Base address of the DSS. */
|
2012-03-14 07:31:41 +08:00
|
|
|
static void *dss_base;
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
|
2012-03-14 07:31:41 +08:00
|
|
|
static void *dss_prev;
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Current upper limit on DSS addresses. */
|
2012-03-14 07:31:41 +08:00
|
|
|
static void *dss_max;
|
2010-01-24 18:53:40 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
static void *
|
2013-12-04 13:49:36 +08:00
|
|
|
chunk_dss_sbrk(intptr_t increment)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
#ifdef JEMALLOC_DSS
|
2013-12-04 13:49:36 +08:00
|
|
|
return (sbrk(increment));
|
|
|
|
#else
|
2012-04-13 11:20:58 +08:00
|
|
|
not_implemented();
|
|
|
|
return (NULL);
|
|
|
|
#endif
|
2013-12-04 13:49:36 +08:00
|
|
|
}
|
2010-01-24 18:53:40 +08:00
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
dss_prec_t
|
|
|
|
chunk_dss_prec_get(void)
|
|
|
|
{
|
|
|
|
dss_prec_t ret;
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!have_dss)
|
2012-10-12 04:53:15 +08:00
|
|
|
return (dss_prec_disabled);
|
|
|
|
malloc_mutex_lock(&dss_mtx);
|
|
|
|
ret = dss_prec_default;
|
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
chunk_dss_prec_set(dss_prec_t dss_prec)
|
|
|
|
{
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!have_dss)
|
2014-04-16 03:09:48 +08:00
|
|
|
return (dss_prec != dss_prec_disabled);
|
2012-10-12 04:53:15 +08:00
|
|
|
malloc_mutex_lock(&dss_mtx);
|
|
|
|
dss_prec_default = dss_prec;
|
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
void *
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
2015-08-05 01:49:46 +08:00
|
|
|
bool *zero, bool *commit)
|
2010-01-24 18:53:40 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
cassert(have_dss);
|
2012-04-11 01:50:33 +08:00
|
|
|
assert(size > 0 && (size & chunksize_mask) == 0);
|
|
|
|
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/*
|
|
|
|
* sbrk() uses a signed increment argument, so take care not to
|
|
|
|
* interpret a huge allocation request as a negative increment.
|
|
|
|
*/
|
|
|
|
if ((intptr_t)size < 0)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
malloc_mutex_lock(&dss_mtx);
|
|
|
|
if (dss_prev != (void *)-1) {
|
2012-04-11 01:50:33 +08:00
|
|
|
size_t gap_size, cpad_size;
|
|
|
|
void *cpad, *dss_next;
|
2010-01-24 18:53:40 +08:00
|
|
|
intptr_t incr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The loop is necessary to recover from races with other
|
|
|
|
* threads that are using the DSS for something other than
|
|
|
|
* malloc.
|
|
|
|
*/
|
|
|
|
do {
|
2014-11-04 03:02:52 +08:00
|
|
|
/* Avoid an unnecessary system call. */
|
|
|
|
if (new_addr != NULL && dss_max != new_addr)
|
|
|
|
break;
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/* Get the current end of the DSS. */
|
2013-12-04 13:49:36 +08:00
|
|
|
dss_max = chunk_dss_sbrk(0);
|
2014-11-04 03:02:52 +08:00
|
|
|
|
|
|
|
/* Make sure the earlier condition still holds. */
|
|
|
|
if (new_addr != NULL && dss_max != new_addr)
|
|
|
|
break;
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/*
|
|
|
|
* Calculate how much padding is necessary to
|
|
|
|
* chunk-align the end of the DSS.
|
|
|
|
*/
|
2012-04-11 01:50:33 +08:00
|
|
|
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
|
|
|
|
chunksize_mask;
|
|
|
|
/*
|
|
|
|
* Compute how much chunk-aligned pad space (if any) is
|
|
|
|
* necessary to satisfy alignment. This space can be
|
|
|
|
* recycled for later use.
|
|
|
|
*/
|
|
|
|
cpad = (void *)((uintptr_t)dss_max + gap_size);
|
2012-04-12 09:13:45 +08:00
|
|
|
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
|
|
|
|
alignment);
|
2012-04-11 01:50:33 +08:00
|
|
|
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
|
|
|
|
dss_next = (void *)((uintptr_t)ret + size);
|
|
|
|
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
|
|
|
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
|
|
|
/* Wrap-around. */
|
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
return (NULL);
|
2010-01-24 18:53:40 +08:00
|
|
|
}
|
2012-04-11 01:50:33 +08:00
|
|
|
incr = gap_size + cpad_size + size;
|
2013-12-04 13:49:36 +08:00
|
|
|
dss_prev = chunk_dss_sbrk(incr);
|
2010-01-24 18:53:40 +08:00
|
|
|
if (dss_prev == dss_max) {
|
|
|
|
/* Success. */
|
2012-04-11 01:50:33 +08:00
|
|
|
dss_max = dss_next;
|
2010-01-24 18:53:40 +08:00
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
2015-02-16 10:04:46 +08:00
|
|
|
if (cpad_size != 0) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_t chunk_hooks =
|
|
|
|
CHUNK_HOOKS_INITIALIZER;
|
|
|
|
chunk_dalloc_wrapper(arena,
|
2015-08-05 01:49:46 +08:00
|
|
|
&chunk_hooks, cpad, cpad_size,
|
|
|
|
true);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
2012-04-22 07:04:51 +08:00
|
|
|
if (*zero) {
|
2014-04-16 07:35:08 +08:00
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
|
|
|
ret, size);
|
2012-04-22 07:04:51 +08:00
|
|
|
memset(ret, 0, size);
|
|
|
|
}
|
2015-08-05 01:49:46 +08:00
|
|
|
*commit = true;
|
2010-01-24 18:53:40 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
} while (dss_prev != (void *)-1);
|
|
|
|
}
|
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2010-12-01 08:50:58 +08:00
|
|
|
bool
|
|
|
|
chunk_in_dss(void *chunk)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
cassert(have_dss);
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2010-12-01 08:50:58 +08:00
|
|
|
malloc_mutex_lock(&dss_mtx);
|
|
|
|
if ((uintptr_t)chunk >= (uintptr_t)dss_base
|
|
|
|
&& (uintptr_t)chunk < (uintptr_t)dss_max)
|
|
|
|
ret = true;
|
|
|
|
else
|
|
|
|
ret = false;
|
|
|
|
malloc_mutex_unlock(&dss_mtx);
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
bool
|
|
|
|
chunk_dss_boot(void)
|
|
|
|
{
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
cassert(have_dss);
|
2012-02-11 12:22:09 +08:00
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
if (malloc_mutex_init(&dss_mtx))
|
|
|
|
return (true);
|
2013-12-04 13:49:36 +08:00
|
|
|
dss_base = chunk_dss_sbrk(0);
|
2010-01-24 18:53:40 +08:00
|
|
|
dss_prev = dss_base;
|
|
|
|
dss_max = dss_base;
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2012-03-14 07:31:41 +08:00
|
|
|
void
|
|
|
|
chunk_dss_prefork(void)
|
|
|
|
{
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
if (have_dss)
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_prefork(&dss_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_dss_postfork_parent(void)
|
|
|
|
{
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
if (have_dss)
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_postfork_parent(&dss_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_dss_postfork_child(void)
|
|
|
|
{
|
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
if (have_dss)
|
2012-03-14 07:31:41 +08:00
|
|
|
malloc_mutex_postfork_child(&dss_mtx);
|
|
|
|
}
|
|
|
|
|
2010-01-24 18:53:40 +08:00
|
|
|
/******************************************************************************/
|