Generalize chunk management hooks.
Add the "arena.<i>.chunk_hooks" mallctl, which replaces and expands on the "arena.<i>.chunk.{alloc,dalloc,purge}" mallctls. The chunk hooks allow control over chunk allocation/deallocation, decommit/commit, purging, and splitting/merging, such that the application can rely on jemalloc's internal chunk caching and retaining functionality, yet implement a variety of chunk management mechanisms and policies. Merge the chunks_[sz]ad_{mmap,dss} red-black trees into chunks_[sz]ad_retained. This slightly reduces how hard jemalloc tries to honor the dss precedence setting; prior to this change the precedence setting was also consulted when recycling chunks. Fix chunk purging. Don't purge chunks in arena_purge_stashed(); instead deallocate them in arena_unstash_purged(), so that the dirty memory linkage remains valid until after the last time it is used. This resolves #176 and #201.
This commit is contained in:
184
src/arena.c
184
src/arena.c
@@ -516,23 +516,23 @@ static bool
|
||||
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
|
||||
{
|
||||
|
||||
extent_node_init(&chunk->node, arena, chunk, chunksize, zero);
|
||||
extent_node_init(&chunk->node, arena, chunk, chunksize, true, zero);
|
||||
extent_node_achunk_set(&chunk->node, true);
|
||||
return (chunk_register(chunk, &chunk->node));
|
||||
}
|
||||
|
||||
static arena_chunk_t *
|
||||
arena_chunk_alloc_internal_hard(arena_t *arena, bool *zero)
|
||||
arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
bool *zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
chunk_alloc_t *chunk_alloc = arena->chunk_alloc;
|
||||
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_alloc, NULL,
|
||||
|
||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
|
||||
chunksize, chunksize, zero);
|
||||
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
|
||||
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)chunk,
|
||||
chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
|
||||
chunksize);
|
||||
chunk = NULL;
|
||||
}
|
||||
@@ -545,19 +545,18 @@ static arena_chunk_t *
|
||||
arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
if (likely(arena->chunk_alloc == chunk_alloc_default)) {
|
||||
chunk = chunk_alloc_cache(arena, NULL, chunksize, chunksize,
|
||||
zero, true);
|
||||
if (chunk != NULL && arena_chunk_register(arena, chunk,
|
||||
*zero)) {
|
||||
chunk_dalloc_cache(arena, chunk, chunksize);
|
||||
return (NULL);
|
||||
}
|
||||
} else
|
||||
chunk = NULL;
|
||||
if (chunk == NULL)
|
||||
chunk = arena_chunk_alloc_internal_hard(arena, zero);
|
||||
chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
|
||||
chunksize, zero, true);
|
||||
if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
|
||||
chunk_dalloc_cache(arena, &chunk_hooks, chunk, chunksize);
|
||||
return (NULL);
|
||||
}
|
||||
if (chunk == NULL) {
|
||||
chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
|
||||
zero);
|
||||
}
|
||||
|
||||
if (config_stats && chunk != NULL) {
|
||||
arena->stats.mapped += chunksize;
|
||||
@@ -657,7 +656,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
|
||||
if (arena->spare != NULL) {
|
||||
arena_chunk_t *spare = arena->spare;
|
||||
chunk_dalloc_t *chunk_dalloc;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
arena->spare = chunk;
|
||||
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
||||
@@ -667,15 +666,8 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
|
||||
chunk_deregister(spare, &spare->node);
|
||||
|
||||
chunk_dalloc = arena->chunk_dalloc;
|
||||
if (likely(chunk_dalloc == chunk_dalloc_default))
|
||||
chunk_dalloc_cache(arena, (void *)spare, chunksize);
|
||||
else {
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk_dalloc_wrapper(arena, chunk_dalloc, (void *)spare,
|
||||
chunksize);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
}
|
||||
chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
|
||||
chunksize);
|
||||
|
||||
if (config_stats) {
|
||||
arena->stats.mapped -= chunksize;
|
||||
@@ -781,12 +773,12 @@ arena_node_dalloc(arena_t *arena, extent_node_t *node)
|
||||
}
|
||||
|
||||
static void *
|
||||
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
||||
arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
size_t usize, size_t alignment, bool *zero, size_t csize)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = chunk_alloc_wrapper(arena, chunk_alloc, NULL, csize, alignment,
|
||||
ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
|
||||
zero);
|
||||
if (ret == NULL) {
|
||||
/* Revert optimistic stats updates. */
|
||||
@@ -807,7 +799,7 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||
bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
chunk_alloc_t *chunk_alloc;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
size_t csize = CHUNK_CEILING(usize);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
@@ -819,15 +811,11 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||
}
|
||||
arena->nactive += (usize >> LG_PAGE);
|
||||
|
||||
chunk_alloc = arena->chunk_alloc;
|
||||
if (likely(chunk_alloc == chunk_alloc_default)) {
|
||||
ret = chunk_alloc_cache(arena, NULL, csize, alignment, zero,
|
||||
true);
|
||||
} else
|
||||
ret = NULL;
|
||||
ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
|
||||
zero, true);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (ret == NULL) {
|
||||
ret = arena_chunk_alloc_huge_hard(arena, chunk_alloc, usize,
|
||||
ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
|
||||
alignment, zero, csize);
|
||||
}
|
||||
|
||||
@@ -839,12 +827,11 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
|
||||
void
|
||||
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
|
||||
{
|
||||
chunk_dalloc_t *chunk_dalloc;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
size_t csize;
|
||||
|
||||
csize = CHUNK_CEILING(usize);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
chunk_dalloc = arena->chunk_dalloc;
|
||||
if (config_stats) {
|
||||
arena_huge_dalloc_stats_update(arena, usize);
|
||||
arena->stats.mapped -= usize;
|
||||
@@ -852,13 +839,8 @@ arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
|
||||
}
|
||||
arena->nactive -= (usize >> LG_PAGE);
|
||||
|
||||
if (likely(chunk_dalloc == chunk_dalloc_default)) {
|
||||
chunk_dalloc_cache(arena, chunk, csize);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
} else {
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk_dalloc_wrapper(arena, chunk_dalloc, chunk, csize);
|
||||
}
|
||||
chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -904,30 +886,23 @@ arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
|
||||
arena->nactive -= udiff >> LG_PAGE;
|
||||
|
||||
if (cdiff != 0) {
|
||||
chunk_dalloc_t *chunk_dalloc = arena->chunk_dalloc;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
void *nchunk = (void *)((uintptr_t)chunk +
|
||||
CHUNK_CEILING(usize));
|
||||
|
||||
if (likely(chunk_dalloc == chunk_dalloc_default)) {
|
||||
chunk_dalloc_cache(arena, nchunk, cdiff);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
} else {
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk_dalloc_wrapper(arena, chunk_dalloc, nchunk,
|
||||
cdiff);
|
||||
}
|
||||
} else
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff);
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
|
||||
bool
|
||||
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
||||
size_t oldsize, size_t usize, bool *zero, void *nchunk, size_t udiff,
|
||||
size_t cdiff)
|
||||
static bool
|
||||
arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
|
||||
size_t udiff, size_t cdiff)
|
||||
{
|
||||
bool err;
|
||||
|
||||
err = (chunk_alloc_wrapper(arena, chunk_alloc, nchunk, cdiff, chunksize,
|
||||
err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
|
||||
zero) == NULL);
|
||||
if (err) {
|
||||
/* Revert optimistic stats updates. */
|
||||
@@ -939,6 +914,10 @@ arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_alloc_t *chunk_alloc,
|
||||
}
|
||||
arena->nactive -= (udiff >> LG_PAGE);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
||||
cdiff, true, arena->ind)) {
|
||||
chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero);
|
||||
err = true;
|
||||
}
|
||||
return (err);
|
||||
}
|
||||
@@ -948,11 +927,13 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
|
||||
size_t usize, bool *zero)
|
||||
{
|
||||
bool err;
|
||||
chunk_alloc_t *chunk_alloc;
|
||||
chunk_hooks_t chunk_hooks;
|
||||
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
||||
size_t udiff = usize - oldsize;
|
||||
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
||||
|
||||
chunk_hooks = chunk_hooks_get(arena);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
/* Optimistically update stats. */
|
||||
@@ -962,16 +943,17 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
|
||||
}
|
||||
arena->nactive += (udiff >> LG_PAGE);
|
||||
|
||||
chunk_alloc = arena->chunk_alloc;
|
||||
if (likely(chunk_alloc == chunk_alloc_default)) {
|
||||
err = (chunk_alloc_cache(arena, nchunk, cdiff, chunksize, zero,
|
||||
true) == NULL);
|
||||
} else
|
||||
err = true;
|
||||
err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
|
||||
chunksize, zero, true) == NULL);
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
if (err) {
|
||||
err = arena_chunk_ralloc_huge_expand_hard(arena, chunk_alloc,
|
||||
oldsize, usize, zero, nchunk, udiff, cdiff);
|
||||
err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
|
||||
chunk, oldsize, usize, zero, nchunk, udiff,
|
||||
cdiff);
|
||||
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
|
||||
cdiff, true, arena->ind)) {
|
||||
chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero);
|
||||
err = true;
|
||||
}
|
||||
|
||||
if (config_stats && !err)
|
||||
@@ -1198,8 +1180,8 @@ arena_compute_npurge(arena_t *arena, bool all)
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||
arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, bool all,
|
||||
size_t npurge, arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||
extent_node_t *purge_chunks_sentinel)
|
||||
{
|
||||
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
||||
@@ -1224,7 +1206,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
||||
* dalloc_node=false argument to chunk_alloc_cache().
|
||||
*/
|
||||
zero = false;
|
||||
chunk = chunk_alloc_cache(arena,
|
||||
chunk = chunk_alloc_cache(arena, chunk_hooks,
|
||||
extent_node_addr_get(chunkselm),
|
||||
extent_node_size_get(chunkselm), chunksize, &zero,
|
||||
false);
|
||||
@@ -1278,12 +1260,11 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_purge_stashed(arena_t *arena,
|
||||
arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||
extent_node_t *purge_chunks_sentinel)
|
||||
{
|
||||
size_t npurged, nmadvise;
|
||||
chunk_purge_t *chunk_purge;
|
||||
arena_runs_dirty_link_t *rdelm;
|
||||
extent_node_t *chunkselm;
|
||||
|
||||
@@ -1291,7 +1272,6 @@ arena_purge_stashed(arena_t *arena,
|
||||
nmadvise = 0;
|
||||
npurged = 0;
|
||||
|
||||
chunk_purge = arena->chunk_purge;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
||||
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
|
||||
@@ -1299,13 +1279,16 @@ arena_purge_stashed(arena_t *arena,
|
||||
size_t npages;
|
||||
|
||||
if (rdelm == &chunkselm->rd) {
|
||||
/*
|
||||
* Don't actually purge the chunk here because 1)
|
||||
* chunkselm is embedded in the chunk and must remain
|
||||
* valid, and 2) we deallocate the chunk in
|
||||
* arena_unstash_purged(), where it is destroyed,
|
||||
* decommitted, or purged, depending on chunk
|
||||
* deallocation policy.
|
||||
*/
|
||||
size_t size = extent_node_size_get(chunkselm);
|
||||
bool unzeroed;
|
||||
|
||||
npages = size >> LG_PAGE;
|
||||
unzeroed = chunk_purge_wrapper(arena, chunk_purge,
|
||||
extent_node_addr_get(chunkselm), 0, size);
|
||||
extent_node_zeroed_set(chunkselm, !unzeroed);
|
||||
chunkselm = qr_next(chunkselm, cc_link);
|
||||
} else {
|
||||
size_t pageind, run_size, flag_unzeroed, i;
|
||||
@@ -1319,8 +1302,9 @@ arena_purge_stashed(arena_t *arena,
|
||||
npages = run_size >> LG_PAGE;
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
unzeroed = chunk_purge_wrapper(arena, chunk_purge,
|
||||
chunk, pageind << LG_PAGE, run_size);
|
||||
unzeroed = chunk_purge_wrapper(arena,
|
||||
chunk_hooks, chunk, chunksize, pageind << LG_PAGE,
|
||||
run_size);
|
||||
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
||||
|
||||
/*
|
||||
@@ -1355,14 +1339,14 @@ arena_purge_stashed(arena_t *arena,
|
||||
}
|
||||
|
||||
static void
|
||||
arena_unstash_purged(arena_t *arena,
|
||||
arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
arena_runs_dirty_link_t *purge_runs_sentinel,
|
||||
extent_node_t *purge_chunks_sentinel)
|
||||
{
|
||||
arena_runs_dirty_link_t *rdelm, *rdelm_next;
|
||||
extent_node_t *chunkselm;
|
||||
|
||||
/* Deallocate runs. */
|
||||
/* Deallocate chunks/runs. */
|
||||
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
|
||||
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
|
||||
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
|
||||
@@ -1376,7 +1360,8 @@ arena_unstash_purged(arena_t *arena,
|
||||
extent_node_dirty_remove(chunkselm);
|
||||
arena_node_dalloc(arena, chunkselm);
|
||||
chunkselm = chunkselm_next;
|
||||
chunk_dalloc_arena(arena, addr, size, zeroed);
|
||||
chunk_dalloc_arena(arena, chunk_hooks, addr, size,
|
||||
zeroed);
|
||||
} else {
|
||||
arena_chunk_map_misc_t *miscelm =
|
||||
arena_rd_to_miscelm(rdelm);
|
||||
@@ -1390,6 +1375,7 @@ arena_unstash_purged(arena_t *arena,
|
||||
static void
|
||||
arena_purge(arena_t *arena, bool all)
|
||||
{
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
size_t npurge, npurgeable, npurged;
|
||||
arena_runs_dirty_link_t purge_runs_sentinel;
|
||||
extent_node_t purge_chunks_sentinel;
|
||||
@@ -1413,13 +1399,13 @@ arena_purge(arena_t *arena, bool all)
|
||||
qr_new(&purge_runs_sentinel, rd_link);
|
||||
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
|
||||
|
||||
npurgeable = arena_stash_dirty(arena, all, npurge, &purge_runs_sentinel,
|
||||
&purge_chunks_sentinel);
|
||||
npurgeable = arena_stash_dirty(arena, &chunk_hooks, all, npurge,
|
||||
&purge_runs_sentinel, &purge_chunks_sentinel);
|
||||
assert(npurgeable >= npurge);
|
||||
npurged = arena_purge_stashed(arena, &purge_runs_sentinel,
|
||||
npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
|
||||
&purge_chunks_sentinel);
|
||||
assert(npurged == npurgeable);
|
||||
arena_unstash_purged(arena, &purge_runs_sentinel,
|
||||
arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
|
||||
&purge_chunks_sentinel);
|
||||
|
||||
arena->purging = false;
|
||||
@@ -2874,21 +2860,17 @@ arena_new(unsigned ind)
|
||||
if (malloc_mutex_init(&arena->huge_mtx))
|
||||
return (NULL);
|
||||
|
||||
extent_tree_szad_new(&arena->chunks_szad_cache);
|
||||
extent_tree_ad_new(&arena->chunks_ad_cache);
|
||||
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
||||
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
||||
extent_tree_szad_new(&arena->chunks_szad_dss);
|
||||
extent_tree_ad_new(&arena->chunks_ad_dss);
|
||||
extent_tree_szad_new(&arena->chunks_szad_cached);
|
||||
extent_tree_ad_new(&arena->chunks_ad_cached);
|
||||
extent_tree_szad_new(&arena->chunks_szad_retained);
|
||||
extent_tree_ad_new(&arena->chunks_ad_retained);
|
||||
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||
return (NULL);
|
||||
ql_new(&arena->node_cache);
|
||||
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||
return (NULL);
|
||||
|
||||
arena->chunk_alloc = chunk_alloc_default;
|
||||
arena->chunk_dalloc = chunk_dalloc_default;
|
||||
arena->chunk_purge = chunk_purge_default;
|
||||
arena->chunk_hooks = chunk_hooks_default;
|
||||
|
||||
/* Initialize bins. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
|
@@ -66,7 +66,7 @@ base_chunk_alloc(size_t minsize)
|
||||
base_resident += PAGE_CEILING(nsize);
|
||||
}
|
||||
}
|
||||
extent_node_init(node, NULL, addr, csize, true);
|
||||
extent_node_init(node, NULL, addr, csize, true, true);
|
||||
return (node);
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ base_alloc(size_t size)
|
||||
csize = CACHELINE_CEILING(size);
|
||||
|
||||
usize = s2u(csize);
|
||||
extent_node_init(&key, NULL, NULL, usize, false);
|
||||
extent_node_init(&key, NULL, NULL, usize, true, false);
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||
if (node != NULL) {
|
||||
|
346
src/chunk.c
346
src/chunk.c
@@ -18,7 +18,103 @@ size_t chunksize;
|
||||
size_t chunksize_mask; /* (chunksize - 1). */
|
||||
size_t chunk_npages;
|
||||
|
||||
static void *chunk_alloc_default(void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, unsigned arena_ind);
|
||||
static bool chunk_dalloc_default(void *chunk, size_t size,
|
||||
unsigned arena_ind);
|
||||
static bool chunk_commit_default(void *chunk, size_t size,
|
||||
unsigned arena_ind);
|
||||
static bool chunk_decommit_default(void *chunk, size_t size,
|
||||
unsigned arena_ind);
|
||||
static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
|
||||
size_t length, unsigned arena_ind);
|
||||
static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
|
||||
size_t size_b, bool committed, unsigned arena_ind);
|
||||
static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
|
||||
size_t size_b, bool committed, unsigned arena_ind);
|
||||
|
||||
const chunk_hooks_t chunk_hooks_default = {
|
||||
chunk_alloc_default,
|
||||
chunk_dalloc_default,
|
||||
chunk_commit_default,
|
||||
chunk_decommit_default,
|
||||
chunk_purge_default,
|
||||
chunk_split_default,
|
||||
chunk_merge_default
|
||||
};
|
||||
|
||||
/******************************************************************************/
|
||||
/*
|
||||
* Function prototypes for static functions that are referenced prior to
|
||||
* definition.
|
||||
*/
|
||||
|
||||
static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool committed, bool zeroed);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static chunk_hooks_t
|
||||
chunk_hooks_get_locked(arena_t *arena)
|
||||
{
|
||||
|
||||
return (arena->chunk_hooks);
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_get(arena_t *arena)
|
||||
{
|
||||
chunk_hooks_t chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks = chunk_hooks_get_locked(arena);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
return (chunk_hooks);
|
||||
}
|
||||
|
||||
chunk_hooks_t
|
||||
chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
chunk_hooks_t old_chunk_hooks;
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
old_chunk_hooks = arena->chunk_hooks;
|
||||
arena->chunk_hooks = *chunk_hooks;
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
return (old_chunk_hooks);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
bool locked)
|
||||
{
|
||||
static const chunk_hooks_t uninitialized_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
|
||||
0) {
|
||||
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
|
||||
chunk_hooks_get(arena);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized_locked(arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
|
||||
{
|
||||
|
||||
chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_register(const void *chunk, const extent_node_t *node)
|
||||
@@ -74,21 +170,26 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
|
||||
assert(size == CHUNK_CEILING(size));
|
||||
|
||||
extent_node_init(&key, arena, NULL, size, false);
|
||||
extent_node_init(&key, arena, NULL, size, false, false);
|
||||
return (extent_tree_szad_nsearch(chunks_szad, &key));
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero, bool dalloc_node)
|
||||
chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
size_t alloc_size, leadsize, trailsize;
|
||||
bool zeroed;
|
||||
bool committed, zeroed;
|
||||
|
||||
assert(new_addr == NULL || alignment == chunksize);
|
||||
/*
|
||||
* Cached chunks use the node linkage embedded in their headers, in
|
||||
* which case dalloc_node is true, and new_addr is non-NULL because
|
||||
* we're operating on a specific chunk.
|
||||
*/
|
||||
assert(dalloc_node || new_addr != NULL);
|
||||
|
||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||
@@ -96,9 +197,11 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
if (new_addr != NULL) {
|
||||
extent_node_t key;
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, false);
|
||||
extent_node_init(&key, arena, new_addr, alloc_size, false,
|
||||
false);
|
||||
node = extent_tree_ad_search(chunks_ad, &key);
|
||||
} else {
|
||||
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
|
||||
@@ -115,9 +218,17 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
assert(extent_node_size_get(node) >= leadsize + size);
|
||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||
committed = extent_node_committed_get(node);
|
||||
zeroed = extent_node_zeroed_get(node);
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
/* Split the lead. */
|
||||
if (leadsize != 0 &&
|
||||
chunk_hooks->split(extent_node_addr_get(node),
|
||||
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
@@ -131,23 +242,40 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
node = NULL;
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
/* Split the trail. */
|
||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
||||
trailsize, false, arena->ind)) {
|
||||
if (dalloc_node && node != NULL)
|
||||
arena_node_dalloc(arena, node);
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
|
||||
cache, ret, size + trailsize, committed, zeroed);
|
||||
return (NULL);
|
||||
}
|
||||
/* Insert the trailing space as a smaller chunk. */
|
||||
if (node == NULL) {
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunks_szad, chunks_ad,
|
||||
cache, ret, size, zeroed);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad,
|
||||
chunks_ad, cache, ret, size + trailsize,
|
||||
committed, zeroed);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
|
||||
trailsize, zeroed);
|
||||
trailsize, committed, zeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
node = NULL;
|
||||
}
|
||||
if (!committed && chunk_hooks->commit(ret, size, arena->ind)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
|
||||
ret, size, committed, zeroed);
|
||||
return (NULL);
|
||||
}
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
|
||||
assert(dalloc_node || node != NULL);
|
||||
@@ -168,20 +296,6 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero,
|
||||
true)) != NULL)
|
||||
return (ret);
|
||||
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the caller specifies (!*zero), it is still possible to receive zeroed
|
||||
* memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
|
||||
@@ -193,33 +307,33 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, dss_prec_t dss_prec)
|
||||
{
|
||||
void *ret;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* Retained. */
|
||||
if ((ret = chunk_recycle(arena, &chunk_hooks,
|
||||
&arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
|
||||
new_addr, size, alignment, zero, true)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* "primary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/* mmap. */
|
||||
if (!config_munmap && (ret = chunk_recycle(arena,
|
||||
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
|
||||
size, alignment, zero, true)) != NULL)
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
|
||||
return (ret);
|
||||
/*
|
||||
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
||||
* only call it if (new_addr == NULL).
|
||||
* mmap. Requesting an address is not implemented for
|
||||
* chunk_alloc_mmap(), so only call it if (new_addr == NULL).
|
||||
*/
|
||||
if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero))
|
||||
!= NULL)
|
||||
return (ret);
|
||||
/* "secondary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_secondary && (ret =
|
||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||
NULL)
|
||||
chunk_alloc_dss(arena, new_addr, size, alignment, zero)) != NULL)
|
||||
return (ret);
|
||||
|
||||
/* All strategies for allocation failed. */
|
||||
@@ -248,8 +362,8 @@ chunk_alloc_base(size_t size)
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero, bool dalloc_node)
|
||||
chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@@ -258,8 +372,8 @@ chunk_alloc_cache(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
ret = chunk_recycle(arena, &arena->chunks_szad_cache,
|
||||
&arena->chunks_ad_cache, true, new_addr, size, alignment, zero,
|
||||
ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
|
||||
dalloc_node);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
@@ -285,11 +399,13 @@ chunk_arena_get(unsigned arena_ind)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero)
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
void *ret;
|
||||
arena_t *arena;
|
||||
|
||||
arena = chunk_arena_get(arena_ind);
|
||||
ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
|
||||
arena->dss_prec);
|
||||
if (ret == NULL)
|
||||
@@ -300,55 +416,45 @@ chunk_alloc_arena(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Default arena chunk allocation routine in the absence of user override. This
|
||||
* function isn't actually used by jemalloc, but it does the right thing if the
|
||||
* application passes calls through to it during chunk allocation.
|
||||
*/
|
||||
void *
|
||||
chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
arena_t *arena;
|
||||
|
||||
arena = chunk_arena_get(arena_ind);
|
||||
return (chunk_alloc_arena(arena, new_addr, size, alignment, zero));
|
||||
}
|
||||
|
||||
void *
|
||||
chunk_alloc_wrapper(arena_t *arena, chunk_alloc_t *chunk_alloc, void *new_addr,
|
||||
chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
|
||||
size_t size, size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
ret = chunk_alloc(new_addr, size, alignment, zero, arena->ind);
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero, arena->ind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
if (config_valgrind && chunk_alloc != chunk_alloc_default)
|
||||
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed)
|
||||
static void
|
||||
chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
|
||||
void *chunk, size_t size, bool committed, bool zeroed)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *node, *prev;
|
||||
extent_node_t key;
|
||||
|
||||
assert(maps_coalesce || size == chunksize);
|
||||
assert(!cache || !zeroed);
|
||||
unzeroed = cache || !zeroed;
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
|
||||
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
|
||||
false);
|
||||
false, false);
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
if (node != NULL && extent_node_addr_get(node) ==
|
||||
extent_node_addr_get(&key)) {
|
||||
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
|
||||
committed && !chunk_hooks->merge(chunk, size,
|
||||
extent_node_addr_get(node), extent_node_size_get(node), false,
|
||||
arena->ind)) {
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
@@ -373,12 +479,13 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
* a virtual memory leak.
|
||||
*/
|
||||
if (cache) {
|
||||
chunk_purge_wrapper(arena, arena->chunk_purge,
|
||||
chunk, 0, size);
|
||||
chunk_purge_wrapper(arena, chunk_hooks, chunk,
|
||||
size, 0, size);
|
||||
}
|
||||
goto label_return;
|
||||
}
|
||||
extent_node_init(node, arena, chunk, size, !unzeroed);
|
||||
extent_node_init(node, arena, chunk, size, committed,
|
||||
!unzeroed);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_cache_maybe_insert(arena, node, cache);
|
||||
@@ -387,7 +494,10 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
/* Try to coalesce backward. */
|
||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
||||
extent_node_size_get(prev)) == chunk) {
|
||||
extent_node_size_get(prev)) == chunk &&
|
||||
extent_node_committed_get(prev) == committed &&
|
||||
!chunk_hooks->merge(extent_node_addr_get(prev),
|
||||
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
@@ -414,7 +524,8 @@ label_return:
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
|
||||
chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
@@ -422,57 +533,68 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (!maps_coalesce && size != chunksize) {
|
||||
chunk_dalloc_arena(arena, chunk, size, false);
|
||||
return;
|
||||
}
|
||||
|
||||
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
|
||||
true, chunk, size, false);
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
|
||||
&arena->chunks_ad_cached, true, chunk, size, true, false);
|
||||
arena_maybe_purge(arena);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_arena(arena_t *arena, void *chunk, size_t size, bool zeroed)
|
||||
chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, bool zeroed)
|
||||
{
|
||||
bool committed;
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
if (have_dss && chunk_in_dss(chunk)) {
|
||||
chunk_record(arena, &arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, false, chunk, size, zeroed);
|
||||
} else if (chunk_dalloc_mmap(chunk, size)) {
|
||||
chunk_record(arena, &arena->chunks_szad_mmap,
|
||||
&arena->chunks_ad_mmap, false, chunk, size, zeroed);
|
||||
}
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
/* Try to deallocate. */
|
||||
if (!chunk_hooks->dalloc(chunk, size, arena->ind))
|
||||
return;
|
||||
/* Try to decommit; purge if that fails. */
|
||||
committed = chunk_hooks->decommit(chunk, size, arena->ind);
|
||||
zeroed = !committed || chunk_hooks->purge(chunk, size, 0, size,
|
||||
arena->ind);
|
||||
chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
|
||||
&arena->chunks_ad_retained, false, chunk, size, committed, zeroed);
|
||||
}
|
||||
|
||||
/*
|
||||
* Default arena chunk deallocation routine in the absence of user override.
|
||||
* This function isn't actually used by jemalloc, but it does the right thing if
|
||||
* the application passes calls through to it during chunk deallocation.
|
||||
*/
|
||||
bool
|
||||
static bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||
{
|
||||
|
||||
chunk_dalloc_arena(chunk_arena_get(arena_ind), chunk, size, false);
|
||||
return (false);
|
||||
if (!have_dss || !chunk_in_dss(chunk))
|
||||
return (chunk_dalloc_mmap(chunk, size));
|
||||
return (true);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_dalloc_wrapper(arena_t *arena, chunk_dalloc_t *chunk_dalloc, void *chunk,
|
||||
chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size)
|
||||
{
|
||||
|
||||
chunk_dalloc(chunk, size, arena->ind);
|
||||
if (config_valgrind && chunk_dalloc != chunk_dalloc_default)
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
chunk_hooks->dalloc(chunk, size, arena->ind);
|
||||
if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_commit_default(void *chunk, size_t size, unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (pages_commit(chunk, size));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_decommit_default(void *chunk, size_t size, unsigned arena_ind)
|
||||
{
|
||||
|
||||
return (pages_decommit(chunk, size));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
{
|
||||
@@ -487,8 +609,8 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
|
||||
length));
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_default(void *chunk, size_t offset, size_t length,
|
||||
static bool
|
||||
chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
|
||||
unsigned arena_ind)
|
||||
{
|
||||
|
||||
@@ -497,11 +619,35 @@ chunk_purge_default(void *chunk, size_t offset, size_t length,
|
||||
}
|
||||
|
||||
bool
|
||||
chunk_purge_wrapper(arena_t *arena, chunk_purge_t *chunk_purge, void *chunk,
|
||||
size_t offset, size_t length)
|
||||
chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
|
||||
size_t size, size_t offset, size_t length)
|
||||
{
|
||||
|
||||
return (chunk_purge(chunk, offset, length, arena->ind));
|
||||
chunk_hooks_assure_initialized(arena, chunk_hooks);
|
||||
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||
bool committed, unsigned arena_ind)
|
||||
{
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
|
||||
return (true);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
|
@@ -134,10 +134,10 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0) {
|
||||
chunk_record(arena,
|
||||
&arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, false, cpad,
|
||||
cpad_size, false);
|
||||
chunk_hooks_t chunk_hooks =
|
||||
CHUNK_HOOKS_INITIALIZER;
|
||||
chunk_dalloc_wrapper(arena,
|
||||
&chunk_hooks, cpad, cpad_size);
|
||||
}
|
||||
if (*zero) {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
|
131
src/chunk_mmap.c
131
src/chunk_mmap.c
@@ -2,137 +2,6 @@
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
/* Function prototypes for non-inline static functions. */
|
||||
|
||||
static void *pages_map(void *addr, size_t size);
|
||||
static void pages_unmap(void *addr, size_t size);
|
||||
static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
|
||||
bool *zero);
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
static void *
|
||||
pages_map(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
static void
|
||||
pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
{
|
||||
void *new_addr;
|
||||
|
||||
pages_unmap(addr, alloc_size);
|
||||
new_addr = pages_map(ret, size);
|
||||
if (new_addr == ret)
|
||||
return (ret);
|
||||
if (new_addr)
|
||||
pages_unmap(new_addr, size);
|
||||
return (NULL);
|
||||
}
|
||||
#else
|
||||
{
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0)
|
||||
pages_unmap(addr, leadsize);
|
||||
if (trailsize != 0)
|
||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge(void *addr, size_t length)
|
||||
{
|
||||
bool unzeroed;
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#elif defined(JEMALLOC_HAVE_MADVISE)
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# else
|
||||
# error "No madvise(2) flag defined for purging unused dirty pages."
|
||||
# endif
|
||||
int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
||||
# undef JEMALLOC_MADV_PURGE
|
||||
# undef JEMALLOC_MADV_ZEROS
|
||||
#else
|
||||
/* Last resort no-op. */
|
||||
unzeroed = true;
|
||||
#endif
|
||||
return (unzeroed);
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
|
||||
|
75
src/ctl.c
75
src/ctl.c
@@ -118,9 +118,7 @@ CTL_PROTO(arena_i_purge)
|
||||
static void arena_purge(unsigned arena_ind);
|
||||
CTL_PROTO(arena_i_dss)
|
||||
CTL_PROTO(arena_i_lg_dirty_mult)
|
||||
CTL_PROTO(arena_i_chunk_alloc)
|
||||
CTL_PROTO(arena_i_chunk_dalloc)
|
||||
CTL_PROTO(arena_i_chunk_purge)
|
||||
CTL_PROTO(arena_i_chunk_hooks)
|
||||
INDEX_PROTO(arena_i)
|
||||
CTL_PROTO(arenas_bin_i_size)
|
||||
CTL_PROTO(arenas_bin_i_nregs)
|
||||
@@ -288,17 +286,11 @@ static const ctl_named_node_t tcache_node[] = {
|
||||
{NAME("destroy"), CTL(tcache_destroy)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t chunk_node[] = {
|
||||
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
|
||||
{NAME("dalloc"), CTL(arena_i_chunk_dalloc)},
|
||||
{NAME("purge"), CTL(arena_i_chunk_purge)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t arena_i_node[] = {
|
||||
{NAME("purge"), CTL(arena_i_purge)},
|
||||
{NAME("dss"), CTL(arena_i_dss)},
|
||||
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
|
||||
{NAME("chunk"), CHILD(named, chunk)},
|
||||
{NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
|
||||
};
|
||||
static const ctl_named_node_t super_arena_i_node[] = {
|
||||
{NAME(""), CHILD(named, arena_i)}
|
||||
@@ -1064,8 +1056,8 @@ ctl_postfork_child(void)
|
||||
memcpy(oldp, (void *)&(v), copylen); \
|
||||
ret = EINVAL; \
|
||||
goto label_return; \
|
||||
} else \
|
||||
*(t *)oldp = (v); \
|
||||
} \
|
||||
*(t *)oldp = (v); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@@ -1682,37 +1674,36 @@ label_return:
|
||||
return (ret);
|
||||
}
|
||||
|
||||
#define CHUNK_FUNC(n) \
|
||||
static int \
|
||||
arena_i_chunk_##n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
|
||||
size_t *oldlenp, void *newp, size_t newlen) \
|
||||
{ \
|
||||
\
|
||||
int ret; \
|
||||
unsigned arena_ind = mib[1]; \
|
||||
arena_t *arena; \
|
||||
\
|
||||
malloc_mutex_lock(&ctl_mtx); \
|
||||
if (arena_ind < narenas_total_get() && (arena = \
|
||||
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) { \
|
||||
malloc_mutex_lock(&arena->lock); \
|
||||
READ(arena->chunk_##n, chunk_##n##_t *); \
|
||||
WRITE(arena->chunk_##n, chunk_##n##_t *); \
|
||||
} else { \
|
||||
ret = EFAULT; \
|
||||
goto label_outer_return; \
|
||||
} \
|
||||
ret = 0; \
|
||||
label_return: \
|
||||
malloc_mutex_unlock(&arena->lock); \
|
||||
label_outer_return: \
|
||||
malloc_mutex_unlock(&ctl_mtx); \
|
||||
return (ret); \
|
||||
static int
|
||||
arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||
size_t *oldlenp, void *newp, size_t newlen)
|
||||
{
|
||||
int ret;
|
||||
unsigned arena_ind = mib[1];
|
||||
arena_t *arena;
|
||||
|
||||
malloc_mutex_lock(&ctl_mtx);
|
||||
if (arena_ind < narenas_total_get() && (arena =
|
||||
arena_get(tsd_fetch(), arena_ind, false, true)) != NULL) {
|
||||
if (newp != NULL) {
|
||||
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
|
||||
WRITE(new_chunk_hooks, chunk_hooks_t);
|
||||
old_chunk_hooks = chunk_hooks_set(arena,
|
||||
&new_chunk_hooks);
|
||||
READ(old_chunk_hooks, chunk_hooks_t);
|
||||
} else {
|
||||
chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
|
||||
READ(old_chunk_hooks, chunk_hooks_t);
|
||||
}
|
||||
} else {
|
||||
ret = EFAULT;
|
||||
goto label_return;
|
||||
}
|
||||
ret = 0;
|
||||
label_return:
|
||||
malloc_mutex_unlock(&ctl_mtx);
|
||||
return (ret);
|
||||
}
|
||||
CHUNK_FUNC(alloc)
|
||||
CHUNK_FUNC(dalloc)
|
||||
CHUNK_FUNC(purge)
|
||||
#undef CHUNK_FUNC
|
||||
|
||||
static const ctl_named_node_t *
|
||||
arena_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
|
44
src/huge.c
44
src/huge.c
@@ -79,7 +79,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
extent_node_init(node, arena, ret, size, is_zeroed);
|
||||
extent_node_init(node, arena, ret, size, true, is_zeroed);
|
||||
|
||||
if (huge_node_set(ret, node)) {
|
||||
arena_chunk_dalloc_huge(arena, ret, size);
|
||||
@@ -132,7 +132,7 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
||||
size_t usize_next;
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
chunk_purge_t *chunk_purge;
|
||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||
bool zeroed;
|
||||
|
||||
/* Increase usize to incorporate extra. */
|
||||
@@ -145,15 +145,11 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
chunk_purge = arena->chunk_purge;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
|
||||
/* Fill if necessary (shrinking). */
|
||||
if (oldsize > usize) {
|
||||
size_t sdiff = oldsize - usize;
|
||||
zeroed = !chunk_purge_wrapper(arena, chunk_purge, ptr, usize,
|
||||
sdiff);
|
||||
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
|
||||
CHUNK_CEILING(usize), usize, sdiff);
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
||||
zeroed = false;
|
||||
@@ -185,26 +181,31 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
static bool
|
||||
huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
{
|
||||
extent_node_t *node;
|
||||
arena_t *arena;
|
||||
chunk_purge_t *chunk_purge;
|
||||
chunk_hooks_t chunk_hooks;
|
||||
size_t cdiff;
|
||||
bool zeroed;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = extent_node_arena_get(node);
|
||||
chunk_hooks = chunk_hooks_get(arena);
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
chunk_purge = arena->chunk_purge;
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
/* Split excess chunks. */
|
||||
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
|
||||
CHUNK_CEILING(usize), cdiff, true, arena->ind))
|
||||
return (true);
|
||||
|
||||
if (oldsize > usize) {
|
||||
size_t sdiff = oldsize - usize;
|
||||
zeroed = !chunk_purge_wrapper(arena, chunk_purge,
|
||||
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
||||
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
||||
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
||||
CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
|
||||
usize), sdiff);
|
||||
if (config_fill && unlikely(opt_junk_free)) {
|
||||
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
||||
sdiff);
|
||||
@@ -222,6 +223,8 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
|
||||
/* Zap the excess chunks. */
|
||||
arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
static bool
|
||||
@@ -304,14 +307,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
return (false);
|
||||
}
|
||||
|
||||
if (!maps_coalesce)
|
||||
return (true);
|
||||
|
||||
/* Shrink the allocation in-place. */
|
||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
|
||||
huge_ralloc_no_move_shrink(ptr, oldsize, usize);
|
||||
return (false);
|
||||
}
|
||||
/* Attempt to shrink the allocation in-place. */
|
||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize))
|
||||
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize));
|
||||
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
|
||||
|
167
src/pages.c
Normal file
167
src/pages.c
Normal file
@@ -0,0 +1,167 @@
|
||||
#define JEMALLOC_PAGES_C_
|
||||
#include "jemalloc/internal/jemalloc_internal.h"
|
||||
|
||||
/******************************************************************************/
|
||||
|
||||
void *
|
||||
pages_map(void *addr, size_t size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
|
||||
#ifdef _WIN32
|
||||
/*
|
||||
* If VirtualAlloc can't allocate at the given address when one is
|
||||
* given, it fails and returns NULL.
|
||||
*/
|
||||
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
|
||||
PAGE_READWRITE);
|
||||
#else
|
||||
/*
|
||||
* We don't use MAP_FIXED here, because it can cause the *replacement*
|
||||
* of existing mappings, and we only want to create new mappings.
|
||||
*/
|
||||
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
|
||||
-1, 0);
|
||||
assert(ret != NULL);
|
||||
|
||||
if (ret == MAP_FAILED)
|
||||
ret = NULL;
|
||||
else if (addr != NULL && ret != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right place.
|
||||
*/
|
||||
pages_unmap(ret, size);
|
||||
ret = NULL;
|
||||
}
|
||||
#endif
|
||||
assert(ret == NULL || (addr == NULL && ret != addr)
|
||||
|| (addr != NULL && ret == addr));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
void
|
||||
pages_unmap(void *addr, size_t size)
|
||||
{
|
||||
|
||||
#ifdef _WIN32
|
||||
if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
|
||||
#else
|
||||
if (munmap(addr, size) == -1)
|
||||
#endif
|
||||
{
|
||||
char buf[BUFERROR_BUF];
|
||||
|
||||
buferror(get_errno(), buf, sizeof(buf));
|
||||
malloc_printf("<jemalloc>: Error in "
|
||||
#ifdef _WIN32
|
||||
"VirtualFree"
|
||||
#else
|
||||
"munmap"
|
||||
#endif
|
||||
"(): %s\n", buf);
|
||||
if (opt_abort)
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void *
|
||||
pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
|
||||
{
|
||||
void *ret = (void *)((uintptr_t)addr + leadsize);
|
||||
|
||||
assert(alloc_size >= leadsize + size);
|
||||
#ifdef _WIN32
|
||||
{
|
||||
void *new_addr;
|
||||
|
||||
pages_unmap(addr, alloc_size);
|
||||
new_addr = pages_map(ret, size);
|
||||
if (new_addr == ret)
|
||||
return (ret);
|
||||
if (new_addr)
|
||||
pages_unmap(new_addr, size);
|
||||
return (NULL);
|
||||
}
|
||||
#else
|
||||
{
|
||||
size_t trailsize = alloc_size - leadsize - size;
|
||||
|
||||
if (leadsize != 0)
|
||||
pages_unmap(addr, leadsize);
|
||||
if (trailsize != 0)
|
||||
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static bool
|
||||
pages_commit_impl(void *addr, size_t size, bool commit)
|
||||
{
|
||||
|
||||
#ifndef _WIN32
|
||||
if (config_debug) {
|
||||
int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
|
||||
void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
|
||||
MAP_FIXED, -1, 0);
|
||||
if (result == MAP_FAILED)
|
||||
return (true);
|
||||
if (result != addr) {
|
||||
/*
|
||||
* We succeeded in mapping memory, but not in the right
|
||||
* place.
|
||||
*/
|
||||
pages_unmap(result, size);
|
||||
return (true);
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
#endif
|
||||
return (true);
|
||||
}
|
||||
|
||||
bool
|
||||
pages_commit(void *addr, size_t size)
|
||||
{
|
||||
|
||||
return (pages_commit_impl(addr, size, true));
|
||||
}
|
||||
|
||||
bool
|
||||
pages_decommit(void *addr, size_t size)
|
||||
{
|
||||
|
||||
return (pages_commit_impl(addr, size, false));
|
||||
}
|
||||
|
||||
bool
|
||||
pages_purge(void *addr, size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
|
||||
#ifdef _WIN32
|
||||
VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
|
||||
unzeroed = true;
|
||||
#elif defined(JEMALLOC_HAVE_MADVISE)
|
||||
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
|
||||
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
|
||||
# define JEMALLOC_MADV_ZEROS true
|
||||
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
|
||||
# define JEMALLOC_MADV_PURGE MADV_FREE
|
||||
# define JEMALLOC_MADV_ZEROS false
|
||||
# else
|
||||
# error "No madvise(2) flag defined for purging unused dirty pages."
|
||||
# endif
|
||||
int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
|
||||
unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
|
||||
# undef JEMALLOC_MADV_PURGE
|
||||
# undef JEMALLOC_MADV_ZEROS
|
||||
#else
|
||||
/* Last resort no-op. */
|
||||
unzeroed = true;
|
||||
#endif
|
||||
return (unzeroed);
|
||||
}
|
||||
|
Reference in New Issue
Block a user