2010-01-17 01:53:50 +08:00
|
|
|
#define JEMALLOC_CHUNK_C_
|
2010-02-12 06:45:59 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal.h"
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
/* Data. */
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
const char *opt_dss = DSS_DEFAULT;
|
2015-06-26 04:53:58 +08:00
|
|
|
size_t opt_lg_chunk = 0;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
/* Used exclusively for gdump triggering. */
|
|
|
|
static size_t curchunks;
|
|
|
|
static size_t highchunks;
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
rtree_t chunks_rtree;
|
2010-09-06 01:35:13 +08:00
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/* Various chunk-related settings. */
|
|
|
|
size_t chunksize;
|
|
|
|
size_t chunksize_mask; /* (chunksize - 1). */
|
|
|
|
size_t chunk_npages;
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static void *chunk_alloc_default(void *new_addr, size_t size,
|
|
|
|
size_t alignment, bool *zero, unsigned arena_ind);
|
|
|
|
static bool chunk_dalloc_default(void *chunk, size_t size,
|
|
|
|
unsigned arena_ind);
|
|
|
|
static bool chunk_commit_default(void *chunk, size_t size,
|
|
|
|
unsigned arena_ind);
|
|
|
|
static bool chunk_decommit_default(void *chunk, size_t size,
|
|
|
|
unsigned arena_ind);
|
|
|
|
static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
|
|
|
|
size_t length, unsigned arena_ind);
|
|
|
|
static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
|
|
|
|
size_t size_b, bool committed, unsigned arena_ind);
|
|
|
|
static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
|
|
|
|
size_t size_b, bool committed, unsigned arena_ind);
|
|
|
|
|
|
|
|
const chunk_hooks_t chunk_hooks_default = {
|
|
|
|
chunk_alloc_default,
|
|
|
|
chunk_dalloc_default,
|
|
|
|
chunk_commit_default,
|
|
|
|
chunk_decommit_default,
|
|
|
|
chunk_purge_default,
|
|
|
|
chunk_split_default,
|
|
|
|
chunk_merge_default
|
|
|
|
};
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
/******************************************************************************/
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/*
|
|
|
|
* Function prototypes for static functions that are referenced prior to
|
|
|
|
* definition.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|
|
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
|
|
|
void *chunk, size_t size, bool committed, bool zeroed);
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
static chunk_hooks_t
|
|
|
|
chunk_hooks_get_locked(arena_t *arena)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (arena->chunk_hooks);
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk_hooks_t
|
|
|
|
chunk_hooks_get(arena_t *arena)
|
|
|
|
{
|
|
|
|
chunk_hooks_t chunk_hooks;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arena->chunks_mtx);
|
|
|
|
chunk_hooks = chunk_hooks_get_locked(arena);
|
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
|
|
|
|
|
|
|
return (chunk_hooks);
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk_hooks_t
|
|
|
|
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
|
|
|
{
|
|
|
|
chunk_hooks_t old_chunk_hooks;
|
|
|
|
|
|
|
|
malloc_mutex_lock(&arena->chunks_mtx);
|
|
|
|
old_chunk_hooks = arena->chunk_hooks;
|
|
|
|
arena->chunk_hooks = *chunk_hooks;
|
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
|
|
|
|
|
|
|
return (old_chunk_hooks);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|
|
|
bool locked)
|
|
|
|
{
|
|
|
|
static const chunk_hooks_t uninitialized_hooks =
|
|
|
|
CHUNK_HOOKS_INITIALIZER;
|
|
|
|
|
|
|
|
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
|
|
|
0) {
|
|
|
|
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
|
|
|
chunk_hooks_get(arena);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
|
|
|
chunk_hooks_t *chunk_hooks)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
|
|
|
}
|
2012-04-13 11:20:58 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
bool
|
|
|
|
chunk_register(const void *chunk, const extent_node_t *node)
|
|
|
|
{
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
assert(extent_node_addr_get(node) == chunk);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
|
|
|
|
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
|
|
|
return (true);
|
|
|
|
if (config_prof && opt_prof) {
|
2015-02-16 10:04:46 +08:00
|
|
|
size_t size = extent_node_size_get(node);
|
|
|
|
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
size_t cur = atomic_add_z(&curchunks, nadd);
|
|
|
|
size_t high = atomic_read_z(&highchunks);
|
|
|
|
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
|
|
|
/*
|
|
|
|
* Don't refresh cur, because it may have decreased
|
|
|
|
* since this thread lost the highchunks update race.
|
|
|
|
*/
|
|
|
|
high = atomic_read_z(&highchunks);
|
|
|
|
}
|
|
|
|
if (cur > high && prof_gdump_get_unlocked())
|
|
|
|
prof_gdump();
|
|
|
|
}
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_deregister(const void *chunk, const extent_node_t *node)
|
|
|
|
{
|
|
|
|
bool err;
|
|
|
|
|
|
|
|
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
|
|
|
assert(!err);
|
|
|
|
if (config_prof && opt_prof) {
|
2015-02-16 10:04:46 +08:00
|
|
|
size_t size = extent_node_size_get(node);
|
|
|
|
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
assert(atomic_read_z(&curchunks) >= nsub);
|
|
|
|
atomic_sub_z(&curchunks, nsub);
|
|
|
|
}
|
|
|
|
}
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2015-07-16 07:02:21 +08:00
|
|
|
/*
|
|
|
|
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
|
|
|
|
* fits.
|
|
|
|
*/
|
2015-03-07 11:57:36 +08:00
|
|
|
static extent_node_t *
|
2015-07-16 07:02:21 +08:00
|
|
|
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
2015-03-07 15:25:13 +08:00
|
|
|
extent_tree_t *chunks_ad, size_t size)
|
2015-03-07 11:57:36 +08:00
|
|
|
{
|
2015-07-16 07:02:21 +08:00
|
|
|
extent_node_t key;
|
2015-03-07 11:57:36 +08:00
|
|
|
|
|
|
|
assert(size == CHUNK_CEILING(size));
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
extent_node_init(&key, arena, NULL, size, false, false);
|
2015-07-16 07:02:21 +08:00
|
|
|
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
2015-03-07 11:57:36 +08:00
|
|
|
}
|
|
|
|
|
2012-04-13 11:20:58 +08:00
|
|
|
static void *
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|
|
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
|
|
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
2012-04-13 11:20:58 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
extent_node_t *node;
|
|
|
|
size_t alloc_size, leadsize, trailsize;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
bool committed, zeroed;
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2015-01-31 13:22:54 +08:00
|
|
|
assert(new_addr == NULL || alignment == chunksize);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/*
|
|
|
|
* Cached chunks use the node linkage embedded in their headers, in
|
|
|
|
* which case dalloc_node is true, and new_addr is non-NULL because
|
|
|
|
* we're operating on a specific chunk.
|
|
|
|
*/
|
2015-02-19 08:40:53 +08:00
|
|
|
assert(dalloc_node || new_addr != NULL);
|
2015-01-31 13:22:54 +08:00
|
|
|
|
2015-03-07 09:14:05 +08:00
|
|
|
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
2012-04-13 11:20:58 +08:00
|
|
|
/* Beware size_t wrap-around. */
|
|
|
|
if (alloc_size < size)
|
|
|
|
return (NULL);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_lock(&arena->chunks_mtx);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
2015-03-07 15:25:13 +08:00
|
|
|
if (new_addr != NULL) {
|
2015-03-07 11:57:36 +08:00
|
|
|
extent_node_t key;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
|
|
|
false);
|
2015-03-07 11:57:36 +08:00
|
|
|
node = extent_tree_ad_search(chunks_ad, &key);
|
2015-03-07 15:25:13 +08:00
|
|
|
} else {
|
2015-07-16 07:02:21 +08:00
|
|
|
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
2015-03-07 15:25:13 +08:00
|
|
|
alloc_size);
|
|
|
|
}
|
2015-02-16 10:04:46 +08:00
|
|
|
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
|
|
|
size)) {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
2012-04-13 11:20:58 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
2015-02-16 10:04:46 +08:00
|
|
|
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
|
|
|
alignment) - (uintptr_t)extent_node_addr_get(node);
|
2015-01-31 13:22:54 +08:00
|
|
|
assert(new_addr == NULL || leadsize == 0);
|
2015-02-16 10:04:46 +08:00
|
|
|
assert(extent_node_size_get(node) >= leadsize + size);
|
|
|
|
trailsize = extent_node_size_get(node) - leadsize - size;
|
|
|
|
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
committed = extent_node_committed_get(node);
|
2015-02-16 10:04:46 +08:00
|
|
|
zeroed = extent_node_zeroed_get(node);
|
2013-01-22 11:56:34 +08:00
|
|
|
if (zeroed)
|
|
|
|
*zero = true;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/* Split the lead. */
|
|
|
|
if (leadsize != 0 &&
|
|
|
|
chunk_hooks->split(extent_node_addr_get(node),
|
|
|
|
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2012-04-13 11:20:58 +08:00
|
|
|
/* Remove node from the tree. */
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, node);
|
|
|
|
extent_tree_ad_remove(chunks_ad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_remove(arena, node, cache);
|
2012-04-13 11:20:58 +08:00
|
|
|
if (leadsize != 0) {
|
|
|
|
/* Insert the leading space as a smaller chunk. */
|
2015-02-16 10:04:46 +08:00
|
|
|
extent_node_size_set(node, leadsize);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
|
|
|
extent_tree_ad_insert(chunks_ad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
2012-04-13 11:20:58 +08:00
|
|
|
node = NULL;
|
|
|
|
}
|
|
|
|
if (trailsize != 0) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/* Split the trail. */
|
|
|
|
if (chunk_hooks->split(ret, size + trailsize, size,
|
|
|
|
trailsize, false, arena->ind)) {
|
|
|
|
if (dalloc_node && node != NULL)
|
|
|
|
arena_node_dalloc(arena, node);
|
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
|
|
|
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
|
|
|
cache, ret, size + trailsize, committed, zeroed);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2012-04-13 11:20:58 +08:00
|
|
|
/* Insert the trailing space as a smaller chunk. */
|
|
|
|
if (node == NULL) {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
node = arena_node_alloc(arena);
|
2012-04-13 11:20:58 +08:00
|
|
|
if (node == NULL) {
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_record(arena, chunk_hooks, chunks_szad,
|
|
|
|
chunks_ad, cache, ret, size + trailsize,
|
|
|
|
committed, zeroed);
|
2012-04-13 11:20:58 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
2015-02-18 07:13:52 +08:00
|
|
|
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
trailsize, committed, zeroed);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
|
|
|
extent_tree_ad_insert(chunks_ad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
2012-04-13 11:20:58 +08:00
|
|
|
node = NULL;
|
|
|
|
}
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (!committed && chunk_hooks->commit(ret, size, arena->ind)) {
|
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
|
|
|
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
|
|
|
ret, size, committed, zeroed);
|
|
|
|
return (NULL);
|
|
|
|
}
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2015-02-19 08:51:51 +08:00
|
|
|
assert(dalloc_node || node != NULL);
|
2015-02-19 08:40:53 +08:00
|
|
|
if (dalloc_node && node != NULL)
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
arena_node_dalloc(arena, node);
|
2013-01-22 11:56:34 +08:00
|
|
|
if (*zero) {
|
2014-10-04 01:16:09 +08:00
|
|
|
if (!zeroed)
|
2013-01-22 11:56:34 +08:00
|
|
|
memset(ret, 0, size);
|
|
|
|
else if (config_debug) {
|
|
|
|
size_t i;
|
|
|
|
size_t *p = (size_t *)(uintptr_t)ret;
|
|
|
|
|
2014-04-16 07:35:08 +08:00
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
|
2013-01-22 11:56:34 +08:00
|
|
|
for (i = 0; i < size / sizeof(size_t); i++)
|
|
|
|
assert(p[i] == 0);
|
|
|
|
}
|
|
|
|
}
|
2012-04-13 11:20:58 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2010-01-25 09:13:07 +08:00
|
|
|
/*
|
2014-10-04 01:16:09 +08:00
|
|
|
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
|
|
|
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
|
|
|
|
* advantage of this to avoid demanding zeroed chunks, but taking advantage of
|
|
|
|
* them if they are returned.
|
2010-01-25 09:13:07 +08:00
|
|
|
*/
|
2014-05-06 06:16:56 +08:00
|
|
|
static void *
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
Attempt to expand huge allocations in-place.
This adds support for expanding huge allocations in-place by requesting
memory at a specific address from the chunk allocator.
It's currently only implemented for the chunk recycling path, although
in theory it could also be done by optimistically allocating new chunks.
On Linux, it could attempt an in-place mremap. However, that won't work
in practice since the heap is grown downwards and memory is not unmapped
(in a normal build, at least).
Repeated vector reallocation micro-benchmark:
#include <string.h>
#include <stdlib.h>
int main(void) {
for (size_t i = 0; i < 100; i++) {
void *ptr = NULL;
size_t old_size = 0;
for (size_t size = 4; size < (1 << 30); size *= 2) {
ptr = realloc(ptr, size);
if (!ptr) return 1;
memset(ptr + old_size, 0xff, size - old_size);
old_size = size;
}
free(ptr);
}
}
The glibc allocator fails to do any in-place reallocations on this
benchmark once it passes the M_MMAP_THRESHOLD (default 128k) but it
elides the cost of copies via mremap, which is currently not something
that jemalloc can use.
With this improvement, jemalloc still fails to do any in-place huge
reallocations for the first outer loop, but then succeeds 100% of the
time for the remaining 99 iterations. The time spent doing allocations
and copies drops down to under 5%, with nearly all of it spent doing
purging + faulting (when huge pages are disabled) and the array memset.
An improved mremap API (MREMAP_RETAIN - #138) would be far more general
but this is a portable optimization and would still be useful on Linux
for xallocx.
Numbers with transparent huge pages enabled:
glibc (copies elided via MREMAP_MAYMOVE): 8.471s
jemalloc: 17.816s
jemalloc + no-op madvise: 13.236s
jemalloc + this commit: 6.787s
jemalloc + this commit + no-op madvise: 6.144s
Numbers with transparent huge pages disabled:
glibc (copies elided via MREMAP_MAYMOVE): 15.403s
jemalloc: 39.456s
jemalloc + no-op madvise: 12.768s
jemalloc + this commit: 15.534s
jemalloc + this commit + no-op madvise: 6.354s
Closes #137
2014-10-04 13:39:32 +08:00
|
|
|
bool *zero, dss_prec_t dss_prec)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
2012-05-10 04:05:04 +08:00
|
|
|
assert(alignment != 0);
|
2012-04-11 01:50:33 +08:00
|
|
|
assert((alignment & chunksize_mask) == 0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
/* Retained. */
|
|
|
|
if ((ret = chunk_recycle(arena, &chunk_hooks,
|
|
|
|
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
|
|
|
new_addr, size, alignment, zero, true)) != NULL)
|
|
|
|
return (ret);
|
|
|
|
|
2012-10-12 04:53:15 +08:00
|
|
|
/* "primary" dss. */
|
2015-01-26 09:31:24 +08:00
|
|
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
|
2014-05-06 06:16:56 +08:00
|
|
|
return (ret);
|
2015-01-26 09:31:24 +08:00
|
|
|
/*
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
* mmap. Requesting an address is not implemented for
|
|
|
|
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
|
2015-01-26 09:31:24 +08:00
|
|
|
*/
|
|
|
|
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero))
|
|
|
|
!= NULL)
|
2014-05-06 06:16:56 +08:00
|
|
|
return (ret);
|
2012-10-12 04:53:15 +08:00
|
|
|
/* "secondary" dss. */
|
2015-01-26 09:31:24 +08:00
|
|
|
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
|
2015-01-26 09:31:24 +08:00
|
|
|
return (ret);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* All strategies for allocation failed. */
|
2014-05-06 06:16:56 +08:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
2014-05-16 13:22:27 +08:00
|
|
|
chunk_alloc_base(size_t size)
|
2014-05-06 06:16:56 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
2014-05-16 13:22:27 +08:00
|
|
|
bool zero;
|
2014-05-06 06:16:56 +08:00
|
|
|
|
2015-01-31 13:49:19 +08:00
|
|
|
/*
|
|
|
|
* Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
|
|
|
|
* because it's critical that chunk_alloc_base() return untouched
|
|
|
|
* demand-zeroed virtual memory.
|
|
|
|
*/
|
|
|
|
zero = true;
|
|
|
|
ret = chunk_alloc_mmap(size, chunksize, &zero);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (config_valgrind)
|
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
2015-01-31 13:49:19 +08:00
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
return (ret);
|
|
|
|
}
|
2014-05-06 06:16:56 +08:00
|
|
|
|
2014-05-16 13:22:27 +08:00
|
|
|
void *
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
|
|
|
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
2014-05-16 13:22:27 +08:00
|
|
|
{
|
2015-06-23 05:38:06 +08:00
|
|
|
void *ret;
|
2014-05-16 13:22:27 +08:00
|
|
|
|
2015-02-19 08:40:53 +08:00
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
assert(alignment != 0);
|
|
|
|
assert((alignment & chunksize_mask) == 0);
|
2014-05-16 13:22:27 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
|
|
|
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
2015-06-23 05:38:06 +08:00
|
|
|
dalloc_node);
|
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (config_valgrind)
|
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
|
|
|
return (ret);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
static arena_t *
|
|
|
|
chunk_arena_get(unsigned arena_ind)
|
2014-05-16 13:22:27 +08:00
|
|
|
{
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
arena_t *arena;
|
|
|
|
|
2015-01-15 08:27:31 +08:00
|
|
|
/* Dodge tsd for a0 in order to avoid bootstrapping issues. */
|
|
|
|
arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind,
|
|
|
|
false, true);
|
Refactor/fix arenas manipulation.
Abstract arenas access to use arena_get() (or a0get() where appropriate)
rather than directly reading e.g. arenas[ind]. Prior to the addition of
the arenas.extend mallctl, the worst possible outcome of directly
accessing arenas was a stale read, but arenas.extend may allocate and
assign a new array to arenas.
Add a tsd-based arenas_cache, which amortizes arenas reads. This
introduces some subtle bootstrapping issues, with tsd_boot() now being
split into tsd_boot[01]() to support tsd wrapper allocation
bootstrapping, as well as an arenas_cache_bypass tsd variable which
dynamically terminates allocation of arenas_cache itself.
Promote a0malloc(), a0calloc(), and a0free() to be generally useful for
internal allocation, and use them in several places (more may be
appropriate).
Abstract arena->nthreads management and fix a missing decrement during
thread destruction (recent tsd refactoring left arenas_cleanup()
unused).
Change arena_choose() to propagate OOM, and handle OOM in all callers.
This is important for providing consistent allocation behavior when the
MALLOCX_ARENA() flag is being used. Prior to this fix, it was possible
for an OOM to result in allocation silently allocating from a different
arena than the one specified.
2014-10-08 14:14:57 +08:00
|
|
|
/*
|
|
|
|
* The arena we're allocating on behalf of must have been initialized
|
|
|
|
* already.
|
|
|
|
*/
|
|
|
|
assert(arena != NULL);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
return (arena);
|
|
|
|
}
|
|
|
|
|
2015-02-19 08:40:53 +08:00
|
|
|
static void *
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|
|
|
unsigned arena_ind)
|
2015-02-19 08:40:53 +08:00
|
|
|
{
|
|
|
|
void *ret;
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
arena_t *arena;
|
2015-02-19 08:40:53 +08:00
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
arena = chunk_arena_get(arena_ind);
|
2015-02-19 08:40:53 +08:00
|
|
|
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
|
|
|
arena->dss_prec);
|
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (config_valgrind)
|
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
void *
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
2015-02-19 08:40:53 +08:00
|
|
|
size_t size, size_t alignment, bool *zero)
|
|
|
|
{
|
|
|
|
void *ret;
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
|
|
|
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, arena->ind);
|
2015-02-19 08:40:53 +08:00
|
|
|
if (ret == NULL)
|
|
|
|
return (NULL);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
2015-02-19 08:51:51 +08:00
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
2015-02-19 08:40:53 +08:00
|
|
|
return (ret);
|
2014-05-16 13:22:27 +08:00
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static void
|
|
|
|
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|
|
|
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
|
|
|
void *chunk, size_t size, bool committed, bool zeroed)
|
2012-04-13 11:20:58 +08:00
|
|
|
{
|
2012-10-09 08:56:11 +08:00
|
|
|
bool unzeroed;
|
2015-02-16 10:04:46 +08:00
|
|
|
extent_node_t *node, *prev;
|
|
|
|
extent_node_t key;
|
2012-04-13 11:20:58 +08:00
|
|
|
|
2015-02-18 17:15:50 +08:00
|
|
|
assert(!cache || !zeroed);
|
|
|
|
unzeroed = cache || !zeroed;
|
2014-04-16 07:35:08 +08:00
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_lock(&arena->chunks_mtx);
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
2015-02-18 07:13:52 +08:00
|
|
|
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
false, false);
|
2012-10-12 04:53:15 +08:00
|
|
|
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
2012-05-10 05:48:35 +08:00
|
|
|
/* Try to coalesce forward. */
|
2015-02-16 10:04:46 +08:00
|
|
|
if (node != NULL && extent_node_addr_get(node) ==
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
|
|
|
|
committed && !chunk_hooks->merge(chunk, size,
|
|
|
|
extent_node_addr_get(node), extent_node_size_get(node), false,
|
|
|
|
arena->ind)) {
|
2012-05-10 05:48:35 +08:00
|
|
|
/*
|
|
|
|
* Coalesce chunk with the following address range. This does
|
|
|
|
* not change the position within chunks_ad, so only
|
|
|
|
* remove/insert from/into chunks_szad.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_remove(arena, node, cache);
|
2015-02-16 10:04:46 +08:00
|
|
|
extent_node_addr_set(node, chunk);
|
2015-02-18 07:13:52 +08:00
|
|
|
extent_node_size_set(node, size + extent_node_size_get(node));
|
2015-02-16 10:04:46 +08:00
|
|
|
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
|
|
|
!unzeroed);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
2012-05-10 05:48:35 +08:00
|
|
|
} else {
|
|
|
|
/* Coalescing forward failed, so insert a new node. */
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
node = arena_node_alloc(arena);
|
|
|
|
if (node == NULL) {
|
2012-04-13 11:20:58 +08:00
|
|
|
/*
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
* Node allocation failed, which is an exceedingly
|
2015-02-16 10:04:46 +08:00
|
|
|
* unlikely failure. Leak chunk after making sure its
|
|
|
|
* pages have already been purged, so that this is only
|
|
|
|
* a virtual memory leak.
|
2012-04-13 11:20:58 +08:00
|
|
|
*/
|
2015-03-19 09:55:33 +08:00
|
|
|
if (cache) {
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
|
|
|
size, 0, size);
|
2015-03-19 09:55:33 +08:00
|
|
|
}
|
2013-04-18 00:57:11 +08:00
|
|
|
goto label_return;
|
2012-04-13 11:20:58 +08:00
|
|
|
}
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
extent_node_init(node, arena, chunk, size, committed,
|
|
|
|
!unzeroed);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_ad_insert(chunks_ad, node);
|
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
2012-04-13 11:20:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to coalesce backward. */
|
2012-10-12 04:53:15 +08:00
|
|
|
prev = extent_tree_ad_prev(chunks_ad, node);
|
2015-02-16 10:04:46 +08:00
|
|
|
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
extent_node_size_get(prev)) == chunk &&
|
|
|
|
extent_node_committed_get(prev) == committed &&
|
|
|
|
!chunk_hooks->merge(extent_node_addr_get(prev),
|
|
|
|
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
|
2012-04-13 11:20:58 +08:00
|
|
|
/*
|
|
|
|
* Coalesce chunk with the previous address range. This does
|
|
|
|
* not change the position within chunks_ad, so only
|
|
|
|
* remove/insert node from/into chunks_szad.
|
|
|
|
*/
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, prev);
|
|
|
|
extent_tree_ad_remove(chunks_ad, prev);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_remove(arena, prev, cache);
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_remove(chunks_szad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_remove(arena, node, cache);
|
2015-02-16 10:04:46 +08:00
|
|
|
extent_node_addr_set(node, extent_node_addr_get(prev));
|
2015-02-18 07:13:52 +08:00
|
|
|
extent_node_size_set(node, extent_node_size_get(prev) +
|
|
|
|
extent_node_size_get(node));
|
|
|
|
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
|
|
|
|
extent_node_zeroed_get(node));
|
2012-10-12 04:53:15 +08:00
|
|
|
extent_tree_szad_insert(chunks_szad, node);
|
2015-02-18 17:15:50 +08:00
|
|
|
arena_chunk_cache_maybe_insert(arena, node, cache);
|
2012-04-13 11:20:58 +08:00
|
|
|
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
arena_node_dalloc(arena, prev);
|
2012-04-13 11:20:58 +08:00
|
|
|
}
|
2013-04-18 00:57:11 +08:00
|
|
|
|
|
|
|
label_return:
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
malloc_mutex_unlock(&arena->chunks_mtx);
|
2012-04-13 11:20:58 +08:00
|
|
|
}
|
|
|
|
|
2015-02-19 08:40:53 +08:00
|
|
|
void
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
|
|
|
size_t size)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
assert(chunk != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
|
|
|
&arena->chunks_ad_cached, true, chunk, size, true, false);
|
2015-02-19 08:40:53 +08:00
|
|
|
arena_maybe_purge(arena);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2015-02-16 10:04:46 +08:00
|
|
|
void
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
|
|
|
size_t size, bool zeroed)
|
2015-02-16 10:04:46 +08:00
|
|
|
{
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
bool committed;
|
2015-02-16 10:04:46 +08:00
|
|
|
|
|
|
|
assert(chunk != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
|
|
assert(size != 0);
|
|
|
|
assert((size & chunksize_mask) == 0);
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
|
|
|
/* Try to deallocate. */
|
|
|
|
if (!chunk_hooks->dalloc(chunk, size, arena->ind))
|
|
|
|
return;
|
|
|
|
/* Try to decommit; purge if that fails. */
|
|
|
|
committed = chunk_hooks->decommit(chunk, size, arena->ind);
|
|
|
|
zeroed = !committed || chunk_hooks->purge(chunk, size, 0, size,
|
|
|
|
arena->ind);
|
|
|
|
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
|
|
|
&arena->chunks_ad_retained, false, chunk, size, committed, zeroed);
|
2015-02-16 10:04:46 +08:00
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static bool
|
2015-02-19 08:40:53 +08:00
|
|
|
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
|
|
|
{
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (!have_dss || !chunk_in_dss(chunk))
|
|
|
|
return (chunk_dalloc_mmap(chunk, size));
|
|
|
|
return (true);
|
2015-02-19 08:40:53 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
2015-02-19 08:40:53 +08:00
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
|
|
|
chunk_hooks->dalloc(chunk, size, arena->ind);
|
|
|
|
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
|
2015-02-19 08:40:53 +08:00
|
|
|
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static bool
|
|
|
|
chunk_commit_default(void *chunk, size_t size, unsigned arena_ind)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (pages_commit(chunk, size));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
chunk_decommit_default(void *chunk, size_t size, unsigned arena_ind)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (pages_decommit(chunk, size));
|
|
|
|
}
|
|
|
|
|
2015-03-19 09:55:33 +08:00
|
|
|
bool
|
|
|
|
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(chunk != NULL);
|
|
|
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
|
|
|
assert((offset & PAGE_MASK) == 0);
|
|
|
|
assert(length != 0);
|
|
|
|
assert((length & PAGE_MASK) == 0);
|
|
|
|
|
|
|
|
return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
|
|
|
|
length));
|
|
|
|
}
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
static bool
|
|
|
|
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
2015-03-19 09:55:33 +08:00
|
|
|
unsigned arena_ind)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
|
|
|
|
length));
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
|
|
|
size_t size, size_t offset, size_t length)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
|
|
|
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
|
|
|
bool committed, unsigned arena_ind)
|
2015-03-19 09:55:33 +08:00
|
|
|
{
|
|
|
|
|
Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on
the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks
allow control over chunk allocation/deallocation, decommit/commit,
purging, and splitting/merging, such that the application can rely on
jemalloc's internal chunk caching and retaining functionality, yet
implement a variety of chunk management mechanisms and policies.
Merge the chunks_[sz]ad_{mmap,dss} red-black trees into
chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries
to honor the dss precedence setting; prior to this change the precedence
setting was also consulted when recycling chunks.
Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead
deallocate them in arena_unstash_purged(), so that the dirty memory
linkage remains valid until after the last time it is used.
This resolves #176 and #201.
2015-07-28 23:28:19 +08:00
|
|
|
if (!maps_coalesce)
|
|
|
|
return (true);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
|
|
|
bool committed, unsigned arena_ind)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (!maps_coalesce)
|
|
|
|
return (true);
|
|
|
|
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
|
|
|
return (true);
|
|
|
|
|
|
|
|
return (false);
|
2015-03-19 09:55:33 +08:00
|
|
|
}
|
|
|
|
|
2015-01-31 14:54:08 +08:00
|
|
|
static rtree_node_elm_t *
|
|
|
|
chunks_rtree_node_alloc(size_t nelms)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((rtree_node_elm_t *)base_alloc(nelms *
|
|
|
|
sizeof(rtree_node_elm_t)));
|
|
|
|
}
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
bool
|
2012-04-22 10:17:21 +08:00
|
|
|
chunk_boot(void)
|
2010-01-17 01:53:50 +08:00
|
|
|
{
|
2015-06-26 04:53:58 +08:00
|
|
|
#ifdef _WIN32
|
|
|
|
SYSTEM_INFO info;
|
|
|
|
GetSystemInfo(&info);
|
|
|
|
|
2015-07-08 11:16:25 +08:00
|
|
|
/*
|
|
|
|
* Verify actual page size is equal to or an integral multiple of
|
|
|
|
* configured page size.
|
|
|
|
*/
|
2015-06-26 04:53:58 +08:00
|
|
|
if (info.dwPageSize & ((1U << LG_PAGE) - 1))
|
|
|
|
return (true);
|
|
|
|
|
2015-07-08 11:16:25 +08:00
|
|
|
/*
|
|
|
|
* Configure chunksize (if not set) to match granularity (usually 64K),
|
|
|
|
* so pages_map will always take fast path.
|
|
|
|
*/
|
|
|
|
if (!opt_lg_chunk) {
|
|
|
|
opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity)
|
|
|
|
- 1;
|
|
|
|
}
|
2015-06-26 04:53:58 +08:00
|
|
|
#else
|
|
|
|
if (!opt_lg_chunk)
|
|
|
|
opt_lg_chunk = LG_CHUNK_DEFAULT;
|
|
|
|
#endif
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
/* Set variables according to the value of opt_lg_chunk. */
|
2010-09-06 01:35:13 +08:00
|
|
|
chunksize = (ZU(1) << opt_lg_chunk);
|
2012-04-02 22:04:34 +08:00
|
|
|
assert(chunksize >= PAGE);
|
2010-01-17 01:53:50 +08:00
|
|
|
chunksize_mask = chunksize - 1;
|
2012-04-02 22:04:34 +08:00
|
|
|
chunk_npages = (chunksize >> LG_PAGE);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-04-16 03:09:48 +08:00
|
|
|
if (have_dss && chunk_dss_boot())
|
2010-01-17 01:53:50 +08:00
|
|
|
return (true);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) -
|
|
|
|
opt_lg_chunk, chunks_rtree_node_alloc, NULL))
|
|
|
|
return (true);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
2012-10-10 05:46:22 +08:00
|
|
|
|
|
|
|
void
|
|
|
|
chunk_prefork(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_dss_prefork();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_postfork_parent(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_dss_postfork_parent();
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
chunk_postfork_child(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
chunk_dss_postfork_child();
|
|
|
|
}
|