Integrate whole chunks into unused dirty page purging machinery.
Extend per arena unused dirty page purging to manage unused dirty chunks in aaddtion to unused dirty runs. Rather than immediately unmapping deallocated chunks (or purging them in the --disable-munmap case), store them in a separate set of trees, chunks_[sz]ad_dirty. Preferrentially allocate dirty chunks. When excessive unused dirty pages accumulate, purge runs and chunks in ingegrated LRU order (and unmap chunks in the --enable-munmap case). Refactor extent_node_t to provide accessor functions.
This commit is contained in:
399
src/arena.c
399
src/arena.c
@@ -112,34 +112,94 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||
}
|
||||
|
||||
static void
|
||||
arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||
size_t npages)
|
||||
{
|
||||
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
|
||||
|
||||
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||
LG_PAGE));
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
|
||||
CHUNK_MAP_DIRTY);
|
||||
ql_elm_new(miscelm, dr_link);
|
||||
ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
|
||||
|
||||
qr_new(miscelm, rd_link);
|
||||
qr_meld(&arena->runs_dirty, miscelm, rd_link);
|
||||
arena->ndirty += npages;
|
||||
}
|
||||
|
||||
static void
|
||||
arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||
size_t npages)
|
||||
{
|
||||
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
|
||||
|
||||
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||
LG_PAGE));
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
|
||||
CHUNK_MAP_DIRTY);
|
||||
ql_remove(&arena->runs_dirty, miscelm, dr_link);
|
||||
|
||||
qr_remove(miscelm, rd_link);
|
||||
assert(arena->ndirty >= npages);
|
||||
arena->ndirty -= npages;
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_chunk_dirty_npages(const extent_node_t *node)
|
||||
{
|
||||
|
||||
return (extent_node_size_get(node) >> LG_PAGE);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_chunk_dirty_node_init(extent_node_t *node)
|
||||
{
|
||||
|
||||
qr_new(node, cd_link);
|
||||
qr_new(&node->runs_dirty, rd_link);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_chunk_dirty_insert(arena_chunk_map_misc_t *runs_dirty,
|
||||
extent_node_t *chunks_dirty, extent_node_t *node)
|
||||
{
|
||||
|
||||
qr_meld(chunks_dirty, node, cd_link);
|
||||
qr_meld(runs_dirty, &node->runs_dirty, rd_link);
|
||||
}
|
||||
|
||||
static void
|
||||
arena_chunk_dirty_remove(extent_node_t *node)
|
||||
{
|
||||
|
||||
qr_remove(node, cd_link);
|
||||
qr_remove(&node->runs_dirty, rd_link);
|
||||
}
|
||||
|
||||
void
|
||||
arena_chunk_dirty_maybe_insert(arena_t *arena, extent_node_t *node, bool dirty)
|
||||
{
|
||||
|
||||
arena_chunk_dirty_node_init(node);
|
||||
if (dirty) {
|
||||
arena_chunk_dirty_insert(&arena->runs_dirty,
|
||||
&arena->chunks_dirty, node);
|
||||
arena->ndirty += arena_chunk_dirty_npages(node);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
arena_chunk_dirty_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
|
||||
{
|
||||
|
||||
if (dirty) {
|
||||
arena_chunk_dirty_remove(node);
|
||||
assert(arena->ndirty >= arena_chunk_dirty_npages(node));
|
||||
arena->ndirty -= arena_chunk_dirty_npages(node);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE_C void *
|
||||
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
|
||||
{
|
||||
@@ -243,7 +303,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
|
||||
|
||||
arena_avail_remove(arena, chunk, run_ind, total_pages);
|
||||
if (flag_dirty != 0)
|
||||
arena_dirty_remove(arena, chunk, run_ind, total_pages);
|
||||
arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
|
||||
arena_cactive_update(arena, need_pages, 0);
|
||||
arena->nactive += need_pages;
|
||||
|
||||
@@ -256,7 +316,7 @@ arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
|
||||
arena_mapbits_unallocated_set(chunk,
|
||||
run_ind+total_pages-1, (rem_pages << LG_PAGE),
|
||||
flag_dirty);
|
||||
arena_dirty_insert(arena, chunk, run_ind+need_pages,
|
||||
arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
|
||||
rem_pages);
|
||||
} else {
|
||||
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
|
||||
@@ -405,9 +465,10 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero)
|
||||
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
||||
arena->ind, NULL, chunksize, chunksize, zero);
|
||||
if (chunk != NULL) {
|
||||
chunk->node.arena = arena;
|
||||
chunk->node.addr = chunk;
|
||||
chunk->node.size = 0; /* Indicates this is an arena chunk. */
|
||||
extent_node_arena_set(&chunk->node, arena);
|
||||
extent_node_addr_set(&chunk->node, chunk);
|
||||
extent_node_size_set(&chunk->node, chunksize);
|
||||
extent_node_achunk_set(&chunk->node, true);
|
||||
if (chunk_register(chunk, &chunk->node)) {
|
||||
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
||||
chunk = NULL;
|
||||
@@ -516,7 +577,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
||||
|
||||
arena->spare = chunk;
|
||||
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
||||
arena_dirty_remove(arena, spare, map_bias,
|
||||
arena_run_dirty_remove(arena, spare, map_bias,
|
||||
chunk_npages-map_bias);
|
||||
}
|
||||
chunk_dalloc = arena->chunk_dalloc;
|
||||
@@ -899,18 +960,29 @@ static size_t
|
||||
arena_dirty_count(arena_t *arena)
|
||||
{
|
||||
size_t ndirty = 0;
|
||||
arena_chunk_map_misc_t *miscelm;
|
||||
arena_chunk_t *chunk;
|
||||
size_t pageind, npages;
|
||||
arena_chunk_map_misc_t *runselm;
|
||||
extent_node_t *chunkselm;
|
||||
|
||||
ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
||||
pageind = arena_miscelm_to_pageind(miscelm);
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
|
||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
|
||||
npages = arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||
LG_PAGE;
|
||||
for (runselm = qr_next(&arena->runs_dirty, rd_link),
|
||||
chunkselm = qr_next(&arena->chunks_dirty, cd_link);
|
||||
runselm != &arena->runs_dirty; runselm = qr_next(runselm,
|
||||
rd_link)) {
|
||||
size_t npages;
|
||||
|
||||
if (runselm == &chunkselm->runs_dirty) {
|
||||
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
|
||||
chunkselm = qr_next(chunkselm, cd_link);
|
||||
} else {
|
||||
arena_chunk_t *chunk = (arena_chunk_t
|
||||
*)CHUNK_ADDR2BASE(runselm);
|
||||
size_t pageind = arena_miscelm_to_pageind(runselm);
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) ==
|
||||
0);
|
||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
|
||||
npages = arena_mapbits_unallocated_size_get(chunk,
|
||||
pageind) >> LG_PAGE;
|
||||
}
|
||||
ndirty += npages;
|
||||
}
|
||||
|
||||
@@ -939,41 +1011,94 @@ arena_compute_npurge(arena_t *arena, bool all)
|
||||
|
||||
static size_t
|
||||
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
||||
arena_chunk_miscelms_t *miscelms)
|
||||
arena_chunk_map_misc_t *purge_runs_sentinel,
|
||||
extent_node_t *purge_chunks_sentinel)
|
||||
{
|
||||
arena_chunk_map_misc_t *miscelm;
|
||||
arena_chunk_map_misc_t *runselm, *runselm_next;
|
||||
extent_node_t *chunkselm;
|
||||
size_t nstashed = 0;
|
||||
|
||||
/* Add at least npurge pages to purge_list. */
|
||||
for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
|
||||
miscelm = ql_first(&arena->runs_dirty)) {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
||||
size_t pageind = arena_miscelm_to_pageind(miscelm);
|
||||
size_t run_size = arena_mapbits_unallocated_size_get(chunk,
|
||||
pageind);
|
||||
size_t npages = run_size >> LG_PAGE;
|
||||
arena_run_t *run = &miscelm->run;
|
||||
/* Stash at least npurge pages. */
|
||||
for (runselm = qr_next(&arena->runs_dirty, rd_link),
|
||||
chunkselm = qr_next(&arena->chunks_dirty, cd_link);
|
||||
runselm != &arena->runs_dirty; runselm = runselm_next) {
|
||||
size_t npages;
|
||||
runselm_next = qr_next(runselm, rd_link);
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
||||
if (runselm == &chunkselm->runs_dirty) {
|
||||
extent_node_t *chunkselm_next, *tnode;
|
||||
void *addr;
|
||||
size_t size;
|
||||
bool zeroed, zero;
|
||||
UNUSED void *chunk;
|
||||
|
||||
/*
|
||||
* If purging the spare chunk's run, make it available prior to
|
||||
* allocation.
|
||||
*/
|
||||
if (chunk == arena->spare)
|
||||
arena_chunk_alloc(arena);
|
||||
chunkselm_next = qr_next(chunkselm, cd_link);
|
||||
/*
|
||||
* Cache contents of chunkselm prior to it being
|
||||
* destroyed as a side effect of allocating the chunk.
|
||||
*/
|
||||
addr = extent_node_addr_get(chunkselm);
|
||||
size = extent_node_size_get(chunkselm);
|
||||
zeroed = extent_node_zeroed_get(chunkselm);
|
||||
/* Allocate. */
|
||||
zero = false;
|
||||
chunk = arena->chunk_alloc(addr, size, chunksize, &zero,
|
||||
arena->ind);
|
||||
assert(chunk == addr);
|
||||
/*
|
||||
* Create a temporary node to link into the ring of
|
||||
* stashed allocations.
|
||||
*/
|
||||
tnode = arena_node_alloc(arena);
|
||||
/*
|
||||
* OOM shouldn't be possible because chunk allocation
|
||||
* just cached a node.
|
||||
*/
|
||||
assert(tnode != NULL);
|
||||
extent_node_arena_set(tnode, arena);
|
||||
extent_node_addr_set(tnode, addr);
|
||||
extent_node_size_set(tnode, size);
|
||||
extent_node_zeroed_set(tnode, zeroed);
|
||||
arena_chunk_dirty_node_init(tnode);
|
||||
/* Stash. */
|
||||
arena_chunk_dirty_insert(purge_runs_sentinel,
|
||||
purge_chunks_sentinel, tnode);
|
||||
npages = size >> LG_PAGE;
|
||||
chunkselm = chunkselm_next;
|
||||
} else {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
|
||||
size_t pageind = arena_miscelm_to_pageind(runselm);
|
||||
arena_run_t *run = &runselm->run;
|
||||
size_t run_size =
|
||||
arena_mapbits_unallocated_size_get(chunk, pageind);
|
||||
|
||||
/* Temporarily allocate the free dirty run. */
|
||||
arena_run_split_large(arena, run, run_size, false);
|
||||
/* Append to purge_list for later processing. */
|
||||
ql_elm_new(miscelm, dr_link);
|
||||
ql_tail_insert(miscelms, miscelm, dr_link);
|
||||
npages = run_size >> LG_PAGE;
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
||||
|
||||
/*
|
||||
* If purging the spare chunk's run, make it available
|
||||
* prior to allocation.
|
||||
*/
|
||||
if (chunk == arena->spare)
|
||||
arena_chunk_alloc(arena);
|
||||
|
||||
/* Temporarily allocate the free dirty run. */
|
||||
arena_run_split_large(arena, run, run_size, false);
|
||||
/* Append to purge_runs for later processing. */
|
||||
if (false)
|
||||
qr_new(runselm, rd_link); /* Redundant. */
|
||||
else {
|
||||
assert(qr_next(runselm, rd_link) == runselm);
|
||||
assert(qr_prev(runselm, rd_link) == runselm);
|
||||
}
|
||||
qr_meld(purge_runs_sentinel, runselm, rd_link);
|
||||
}
|
||||
|
||||
nstashed += npages;
|
||||
|
||||
if (!all && nstashed >= npurge)
|
||||
break;
|
||||
}
|
||||
@@ -982,52 +1107,66 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
|
||||
}
|
||||
|
||||
static size_t
|
||||
arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
|
||||
arena_purge_stashed(arena_t *arena, arena_chunk_map_misc_t *purge_runs_sentinel,
|
||||
extent_node_t *purge_chunks_sentinel)
|
||||
{
|
||||
size_t npurged, nmadvise;
|
||||
arena_chunk_map_misc_t *miscelm;
|
||||
arena_chunk_map_misc_t *runselm;
|
||||
extent_node_t *chunkselm;
|
||||
|
||||
if (config_stats)
|
||||
nmadvise = 0;
|
||||
npurged = 0;
|
||||
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
for (runselm = qr_next(purge_runs_sentinel, rd_link),
|
||||
chunkselm = qr_next(purge_chunks_sentinel, cd_link);
|
||||
runselm != purge_runs_sentinel; runselm = qr_next(runselm,
|
||||
rd_link)) {
|
||||
size_t npages;
|
||||
|
||||
ql_foreach(miscelm, miscelms, dr_link) {
|
||||
arena_chunk_t *chunk;
|
||||
size_t pageind, run_size, npages, flag_unzeroed, i;
|
||||
bool unzeroed;
|
||||
if (runselm == &chunkselm->runs_dirty) {
|
||||
size_t size = extent_node_size_get(chunkselm);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
||||
pageind = arena_miscelm_to_pageind(miscelm);
|
||||
run_size = arena_mapbits_large_size_get(chunk, pageind);
|
||||
npages = run_size >> LG_PAGE;
|
||||
pages_purge(extent_node_addr_get(chunkselm), size);
|
||||
npages = size >> LG_PAGE;
|
||||
chunkselm = qr_next(chunkselm, cd_link);
|
||||
} else {
|
||||
arena_chunk_t *chunk;
|
||||
size_t pageind, run_size, flag_unzeroed, i;
|
||||
bool unzeroed;
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
|
||||
LG_PAGE)), run_size);
|
||||
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(runselm);
|
||||
pageind = arena_miscelm_to_pageind(runselm);
|
||||
run_size = arena_mapbits_large_size_get(chunk, pageind);
|
||||
npages = run_size >> LG_PAGE;
|
||||
|
||||
/*
|
||||
* Set the unzeroed flag for all pages, now that pages_purge()
|
||||
* has returned whether the pages were zeroed as a side effect
|
||||
* of purging. This chunk map modification is safe even though
|
||||
* the arena mutex isn't currently owned by this thread,
|
||||
* because the run is marked as allocated, thus protecting it
|
||||
* from being modified by any other thread. As long as these
|
||||
* writes don't perturb the first and last elements'
|
||||
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
|
||||
*/
|
||||
for (i = 0; i < npages; i++) {
|
||||
arena_mapbits_unzeroed_set(chunk, pageind+i,
|
||||
flag_unzeroed);
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
unzeroed = pages_purge((void *)((uintptr_t)chunk +
|
||||
(pageind << LG_PAGE)), run_size);
|
||||
flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
|
||||
|
||||
/*
|
||||
* Set the unzeroed flag for all pages, now that
|
||||
* pages_purge() has returned whether the pages were
|
||||
* zeroed as a side effect of purging. This chunk map
|
||||
* modification is safe even though the arena mutex
|
||||
* isn't currently owned by this thread, because the run
|
||||
* is marked as allocated, thus protecting it from being
|
||||
* modified by any other thread. As long as these
|
||||
* writes don't perturb the first and last elements'
|
||||
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
|
||||
*/
|
||||
for (i = 0; i < npages; i++) {
|
||||
arena_mapbits_unzeroed_set(chunk, pageind+i,
|
||||
flag_unzeroed);
|
||||
}
|
||||
}
|
||||
|
||||
npurged += npages;
|
||||
if (config_stats)
|
||||
nmadvise++;
|
||||
}
|
||||
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
if (config_stats) {
|
||||
@@ -1039,16 +1178,31 @@ arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
|
||||
}
|
||||
|
||||
static void
|
||||
arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
|
||||
arena_unstash_purged(arena_t *arena,
|
||||
arena_chunk_map_misc_t *purge_runs_sentinel,
|
||||
extent_node_t *purge_chunks_sentinel)
|
||||
{
|
||||
arena_chunk_map_misc_t *miscelm;
|
||||
arena_chunk_map_misc_t *runselm, *runselm_next;
|
||||
extent_node_t *chunkselm;
|
||||
|
||||
/* Deallocate runs. */
|
||||
for (miscelm = ql_first(miscelms); miscelm != NULL;
|
||||
miscelm = ql_first(miscelms)) {
|
||||
arena_run_t *run = &miscelm->run;
|
||||
ql_remove(miscelms, miscelm, dr_link);
|
||||
arena_run_dalloc(arena, run, false, true);
|
||||
for (runselm = qr_next(purge_runs_sentinel, rd_link),
|
||||
chunkselm = qr_next(purge_chunks_sentinel, cd_link);
|
||||
runselm != purge_runs_sentinel; runselm = runselm_next) {
|
||||
runselm_next = qr_next(runselm, rd_link);
|
||||
if (runselm == &chunkselm->runs_dirty) {
|
||||
extent_node_t *chunkselm_next = qr_next(chunkselm,
|
||||
cd_link);
|
||||
arena_chunk_dirty_remove(chunkselm);
|
||||
chunk_unmap(arena, extent_node_addr_get(chunkselm),
|
||||
extent_node_size_get(chunkselm));
|
||||
arena_node_dalloc(arena, chunkselm);
|
||||
chunkselm = chunkselm_next;
|
||||
} else {
|
||||
arena_run_t *run = &runselm->run;
|
||||
qr_remove(runselm, rd_link);
|
||||
arena_run_dalloc(arena, run, false, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1056,7 +1210,8 @@ void
|
||||
arena_purge(arena_t *arena, bool all)
|
||||
{
|
||||
size_t npurge, npurgeable, npurged;
|
||||
arena_chunk_miscelms_t purge_list;
|
||||
arena_chunk_map_misc_t purge_runs_sentinel;
|
||||
extent_node_t purge_chunks_sentinel;
|
||||
|
||||
/*
|
||||
* Calls to arena_dirty_count() are disabled even for debug builds
|
||||
@@ -1072,12 +1227,17 @@ arena_purge(arena_t *arena, bool all)
|
||||
arena->stats.npurge++;
|
||||
|
||||
npurge = arena_compute_npurge(arena, all);
|
||||
ql_new(&purge_list);
|
||||
npurgeable = arena_stash_dirty(arena, all, npurge, &purge_list);
|
||||
qr_new(&purge_runs_sentinel, rd_link);
|
||||
arena_chunk_dirty_node_init(&purge_chunks_sentinel);
|
||||
|
||||
npurgeable = arena_stash_dirty(arena, all, npurge, &purge_runs_sentinel,
|
||||
&purge_chunks_sentinel);
|
||||
assert(npurgeable >= npurge);
|
||||
npurged = arena_purge_stashed(arena, &purge_list);
|
||||
npurged = arena_purge_stashed(arena, &purge_runs_sentinel,
|
||||
&purge_chunks_sentinel);
|
||||
assert(npurged == npurgeable);
|
||||
arena_unstash_purged(arena, &purge_list);
|
||||
arena_unstash_purged(arena, &purge_runs_sentinel,
|
||||
&purge_chunks_sentinel);
|
||||
}
|
||||
|
||||
void
|
||||
@@ -1115,9 +1275,12 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
|
||||
run_ind+run_pages+nrun_pages-1) == flag_dirty);
|
||||
arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
|
||||
|
||||
/* If the successor is dirty, remove it from runs_dirty. */
|
||||
/*
|
||||
* If the successor is dirty, remove it from the set of dirty
|
||||
* pages.
|
||||
*/
|
||||
if (flag_dirty != 0) {
|
||||
arena_dirty_remove(arena, chunk, run_ind+run_pages,
|
||||
arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
|
||||
nrun_pages);
|
||||
}
|
||||
|
||||
@@ -1148,9 +1311,14 @@ arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
|
||||
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
|
||||
arena_avail_remove(arena, chunk, run_ind, prun_pages);
|
||||
|
||||
/* If the predecessor is dirty, remove it from runs_dirty. */
|
||||
if (flag_dirty != 0)
|
||||
arena_dirty_remove(arena, chunk, run_ind, prun_pages);
|
||||
/*
|
||||
* If the predecessor is dirty, remove it from the set of dirty
|
||||
* pages.
|
||||
*/
|
||||
if (flag_dirty != 0) {
|
||||
arena_run_dirty_remove(arena, chunk, run_ind,
|
||||
prun_pages);
|
||||
}
|
||||
|
||||
size += prun_size;
|
||||
run_pages += prun_pages;
|
||||
@@ -1224,7 +1392,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
|
||||
arena_avail_insert(arena, chunk, run_ind, run_pages);
|
||||
|
||||
if (dirty)
|
||||
arena_dirty_insert(arena, chunk, run_ind, run_pages);
|
||||
arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
|
||||
|
||||
/* Deallocate chunk if it is now completely unused. */
|
||||
if (size == arena_maxrun) {
|
||||
@@ -1843,7 +2011,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
|
||||
if (run == bin->runcur)
|
||||
bin->runcur = NULL;
|
||||
else {
|
||||
index_t binind = arena_bin_index(chunk->node.arena, bin);
|
||||
index_t binind = arena_bin_index(extent_node_arena_get(
|
||||
&chunk->node), bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
|
||||
if (bin_info->nregs != 1) {
|
||||
@@ -2184,7 +2353,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
arena_t *arena;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->node.arena;
|
||||
arena = extent_node_arena_get(&chunk->node);
|
||||
|
||||
if (usize < oldsize) {
|
||||
/* Fill before shrinking in order avoid a race. */
|
||||
@@ -2422,20 +2591,6 @@ arena_new(unsigned ind)
|
||||
arena->nthreads = 0;
|
||||
if (malloc_mutex_init(&arena->lock))
|
||||
return (NULL);
|
||||
arena->chunk_alloc = chunk_alloc_default;
|
||||
arena->chunk_dalloc = chunk_dalloc_default;
|
||||
ql_new(&arena->huge);
|
||||
if (malloc_mutex_init(&arena->huge_mtx))
|
||||
return (NULL);
|
||||
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
||||
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
||||
extent_tree_szad_new(&arena->chunks_szad_dss);
|
||||
extent_tree_ad_new(&arena->chunks_ad_dss);
|
||||
ql_new(&arena->node_cache);
|
||||
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||
return (NULL);
|
||||
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||
return (NULL);
|
||||
|
||||
if (config_stats) {
|
||||
memset(&arena->stats, 0, sizeof(arena_stats_t));
|
||||
@@ -2463,7 +2618,27 @@ arena_new(unsigned ind)
|
||||
arena->ndirty = 0;
|
||||
|
||||
arena_avail_tree_new(&arena->runs_avail);
|
||||
ql_new(&arena->runs_dirty);
|
||||
qr_new(&arena->runs_dirty, rd_link);
|
||||
qr_new(&arena->chunks_dirty, cd_link);
|
||||
|
||||
ql_new(&arena->huge);
|
||||
if (malloc_mutex_init(&arena->huge_mtx))
|
||||
return (NULL);
|
||||
|
||||
extent_tree_szad_new(&arena->chunks_szad_dirty);
|
||||
extent_tree_ad_new(&arena->chunks_ad_dirty);
|
||||
extent_tree_szad_new(&arena->chunks_szad_mmap);
|
||||
extent_tree_ad_new(&arena->chunks_ad_mmap);
|
||||
extent_tree_szad_new(&arena->chunks_szad_dss);
|
||||
extent_tree_ad_new(&arena->chunks_ad_dss);
|
||||
if (malloc_mutex_init(&arena->chunks_mtx))
|
||||
return (NULL);
|
||||
ql_new(&arena->node_cache);
|
||||
if (malloc_mutex_init(&arena->node_cache_mtx))
|
||||
return (NULL);
|
||||
|
||||
arena->chunk_alloc = chunk_alloc_default;
|
||||
arena->chunk_dalloc = chunk_dalloc_default;
|
||||
|
||||
/* Initialize bins. */
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
|
16
src/base.c
16
src/base.c
@@ -60,8 +60,8 @@ base_chunk_alloc(size_t minsize)
|
||||
if (config_stats)
|
||||
base_allocated += nsize;
|
||||
}
|
||||
node->addr = addr;
|
||||
node->size = csize;
|
||||
extent_node_addr_set(node, addr);
|
||||
extent_node_size_set(node, csize);
|
||||
return (node);
|
||||
}
|
||||
|
||||
@@ -84,8 +84,8 @@ base_alloc(size_t size)
|
||||
*/
|
||||
csize = CACHELINE_CEILING(size);
|
||||
|
||||
key.addr = NULL;
|
||||
key.size = csize;
|
||||
extent_node_addr_set(&key, NULL);
|
||||
extent_node_size_set(&key, csize);
|
||||
malloc_mutex_lock(&base_mtx);
|
||||
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
|
||||
if (node != NULL) {
|
||||
@@ -100,10 +100,10 @@ base_alloc(size_t size)
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
ret = node->addr;
|
||||
if (node->size > csize) {
|
||||
node->addr = (void *)((uintptr_t)ret + csize);
|
||||
node->size -= csize;
|
||||
ret = extent_node_addr_get(node);
|
||||
if (extent_node_size_get(node) > csize) {
|
||||
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
|
||||
extent_node_size_set(node, extent_node_size_get(node) - csize);
|
||||
extent_tree_szad_insert(&base_avail_szad, node);
|
||||
} else
|
||||
base_node_dalloc(node);
|
||||
|
146
src/chunk.c
146
src/chunk.c
@@ -24,12 +24,13 @@ bool
|
||||
chunk_register(const void *chunk, const extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(node->addr == chunk);
|
||||
assert(extent_node_addr_get(node) == chunk);
|
||||
|
||||
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
|
||||
return (true);
|
||||
if (config_prof && opt_prof) {
|
||||
size_t nadd = (node->size == 0) ? 1 : node->size / chunksize;
|
||||
size_t size = extent_node_size_get(node);
|
||||
size_t nadd = (size == 0) ? 1 : size / chunksize;
|
||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||
size_t high = atomic_read_z(&highchunks);
|
||||
while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
|
||||
@@ -54,7 +55,8 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
|
||||
assert(!err);
|
||||
if (config_prof && opt_prof) {
|
||||
size_t nsub = (node->size == 0) ? 1 : node->size / chunksize;
|
||||
size_t size = extent_node_size_get(node);
|
||||
size_t nsub = (size == 0) ? 1 : size / chunksize;
|
||||
assert(atomic_read_z(&curchunks) >= nsub);
|
||||
atomic_sub_z(&curchunks, nsub);
|
||||
}
|
||||
@@ -62,8 +64,8 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
|
||||
|
||||
static void *
|
||||
chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *new_addr, size_t size, size_t alignment,
|
||||
bool *zero)
|
||||
extent_tree_t *chunks_ad, bool dirty, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero)
|
||||
{
|
||||
void *ret;
|
||||
extent_node_t *node;
|
||||
@@ -77,32 +79,35 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
/* Beware size_t wrap-around. */
|
||||
if (alloc_size < size)
|
||||
return (NULL);
|
||||
key.addr = new_addr;
|
||||
key.size = alloc_size;
|
||||
extent_node_addr_set(&key, new_addr);
|
||||
extent_node_size_set(&key, alloc_size);
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
node = (new_addr != NULL) ? extent_tree_ad_search(chunks_ad, &key) :
|
||||
extent_tree_szad_nsearch(chunks_szad, &key);
|
||||
if (node == NULL || (new_addr != NULL && node->size < size)) {
|
||||
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
|
||||
size)) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
|
||||
(uintptr_t)node->addr;
|
||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
|
||||
alignment) - (uintptr_t)extent_node_addr_get(node);
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(node->size >= leadsize + size);
|
||||
trailsize = node->size - leadsize - size;
|
||||
ret = (void *)((uintptr_t)node->addr + leadsize);
|
||||
zeroed = node->zeroed;
|
||||
assert(extent_node_size_get(node) >= leadsize + size);
|
||||
trailsize = extent_node_size_get(node) - leadsize - size;
|
||||
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
|
||||
zeroed = extent_node_zeroed_get(node);
|
||||
if (zeroed)
|
||||
*zero = true;
|
||||
/* Remove node from the tree. */
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
extent_tree_ad_remove(chunks_ad, node);
|
||||
arena_chunk_dirty_maybe_remove(arena, node, dirty);
|
||||
if (leadsize != 0) {
|
||||
/* Insert the leading space as a smaller chunk. */
|
||||
node->size = leadsize;
|
||||
extent_node_size_set(node, leadsize);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||
node = NULL;
|
||||
}
|
||||
if (trailsize != 0) {
|
||||
@@ -111,15 +116,17 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
chunk_unmap(arena, ret, size);
|
||||
chunk_record(arena, chunks_szad, chunks_ad,
|
||||
dirty, ret, size);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
node->addr = (void *)((uintptr_t)(ret) + size);
|
||||
node->size = trailsize;
|
||||
node->zeroed = zeroed;
|
||||
extent_node_addr_set(node, (void *)((uintptr_t)(ret) + size));
|
||||
extent_node_size_set(node, trailsize);
|
||||
extent_node_zeroed_set(node, zeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||
node = NULL;
|
||||
}
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
@@ -148,7 +155,8 @@ chunk_alloc_core_dss(arena_t *arena, void *new_addr, size_t size,
|
||||
void *ret;
|
||||
|
||||
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, new_addr, size, alignment, zero)) != NULL)
|
||||
&arena->chunks_ad_dss, false, new_addr, size, alignment, zero)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
ret = chunk_alloc_dss(arena, new_addr, size, alignment, zero);
|
||||
return (ret);
|
||||
@@ -171,6 +179,11 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
assert(alignment != 0);
|
||||
assert((alignment & chunksize_mask) == 0);
|
||||
|
||||
/* dirty. */
|
||||
if ((ret = chunk_recycle(arena, &arena->chunks_szad_dirty,
|
||||
&arena->chunks_ad_dirty, true, new_addr, size, alignment, zero)) !=
|
||||
NULL)
|
||||
return (ret);
|
||||
/* "primary" dss. */
|
||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||
chunk_alloc_core_dss(arena, new_addr, size, alignment, zero)) !=
|
||||
@@ -178,8 +191,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
return (ret);
|
||||
/* mmap. */
|
||||
if (!config_munmap && (ret = chunk_recycle(arena,
|
||||
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, new_addr, size,
|
||||
alignment, zero)) != NULL)
|
||||
&arena->chunks_szad_mmap, &arena->chunks_ad_mmap, false, new_addr,
|
||||
size, alignment, zero)) != NULL)
|
||||
return (ret);
|
||||
/*
|
||||
* Requesting an address is not implemented for chunk_alloc_mmap(), so
|
||||
@@ -263,54 +276,62 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
arena->dss_prec));
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
extent_tree_t *chunks_ad, void *chunk, size_t size)
|
||||
extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size)
|
||||
{
|
||||
bool unzeroed;
|
||||
extent_node_t *node, *prev, key;
|
||||
extent_node_t *node, *prev;
|
||||
extent_node_t key;
|
||||
|
||||
unzeroed = pages_purge(chunk, size);
|
||||
unzeroed = dirty ? true : pages_purge(chunk, size);
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
|
||||
|
||||
malloc_mutex_lock(&arena->chunks_mtx);
|
||||
key.addr = (void *)((uintptr_t)chunk + size);
|
||||
extent_node_addr_set(&key, (void *)((uintptr_t)chunk + size));
|
||||
node = extent_tree_ad_nsearch(chunks_ad, &key);
|
||||
/* Try to coalesce forward. */
|
||||
if (node != NULL && node->addr == key.addr) {
|
||||
if (node != NULL && extent_node_addr_get(node) ==
|
||||
extent_node_addr_get(&key)) {
|
||||
/*
|
||||
* Coalesce chunk with the following address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
* remove/insert from/into chunks_szad.
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = chunk;
|
||||
node->size += size;
|
||||
node->zeroed = (node->zeroed && !unzeroed);
|
||||
arena_chunk_dirty_maybe_remove(arena, node, dirty);
|
||||
extent_node_addr_set(node, chunk);
|
||||
extent_node_size_set(node, extent_node_size_get(node) + size);
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||
!unzeroed);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||
} else {
|
||||
/* Coalescing forward failed, so insert a new node. */
|
||||
node = arena_node_alloc(arena);
|
||||
if (node == NULL) {
|
||||
/*
|
||||
* Node allocation failed, which is an exceedingly
|
||||
* unlikely failure. Leak chunk; its pages have
|
||||
* already been purged, so this is only a virtual
|
||||
* memory leak.
|
||||
* unlikely failure. Leak chunk after making sure its
|
||||
* pages have already been purged, so that this is only
|
||||
* a virtual memory leak.
|
||||
*/
|
||||
if (dirty)
|
||||
pages_purge(chunk, size);
|
||||
goto label_return;
|
||||
}
|
||||
node->addr = chunk;
|
||||
node->size = size;
|
||||
node->zeroed = !unzeroed;
|
||||
extent_node_addr_set(node, chunk);
|
||||
extent_node_size_set(node, size);
|
||||
extent_node_zeroed_set(node, !unzeroed);
|
||||
extent_tree_ad_insert(chunks_ad, node);
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||
}
|
||||
|
||||
/* Try to coalesce backward. */
|
||||
prev = extent_tree_ad_prev(chunks_ad, node);
|
||||
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
|
||||
chunk) {
|
||||
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
|
||||
extent_node_size_get(prev)) == chunk) {
|
||||
/*
|
||||
* Coalesce chunk with the previous address range. This does
|
||||
* not change the position within chunks_ad, so only
|
||||
@@ -318,12 +339,16 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
|
||||
*/
|
||||
extent_tree_szad_remove(chunks_szad, prev);
|
||||
extent_tree_ad_remove(chunks_ad, prev);
|
||||
|
||||
arena_chunk_dirty_maybe_remove(arena, prev, dirty);
|
||||
extent_tree_szad_remove(chunks_szad, node);
|
||||
node->addr = prev->addr;
|
||||
node->size += prev->size;
|
||||
node->zeroed = (node->zeroed && prev->zeroed);
|
||||
arena_chunk_dirty_maybe_remove(arena, node, dirty);
|
||||
extent_node_addr_set(node, extent_node_addr_get(prev));
|
||||
extent_node_size_set(node, extent_node_size_get(node) +
|
||||
extent_node_size_get(prev));
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
|
||||
extent_node_zeroed_get(prev));
|
||||
extent_tree_szad_insert(chunks_szad, node);
|
||||
arena_chunk_dirty_maybe_insert(arena, node, dirty);
|
||||
|
||||
arena_node_dalloc(arena, prev);
|
||||
}
|
||||
@@ -332,6 +357,28 @@ label_return:
|
||||
malloc_mutex_unlock(&arena->chunks_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_cache(arena_t *arena, void *chunk, size_t size)
|
||||
{
|
||||
|
||||
assert(chunk != NULL);
|
||||
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||
assert(size != 0);
|
||||
assert((size & chunksize_mask) == 0);
|
||||
|
||||
chunk_record(arena, &arena->chunks_szad_dirty, &arena->chunks_ad_dirty,
|
||||
true, chunk, size);
|
||||
}
|
||||
|
||||
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||
bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||
{
|
||||
|
||||
chunk_cache(chunk_arena_get(arena_ind), chunk, size);
|
||||
return (false);
|
||||
}
|
||||
|
||||
void
|
||||
chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
||||
{
|
||||
@@ -343,22 +390,13 @@ chunk_unmap(arena_t *arena, void *chunk, size_t size)
|
||||
|
||||
if (have_dss && chunk_in_dss(chunk)) {
|
||||
chunk_record(arena, &arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, chunk, size);
|
||||
&arena->chunks_ad_dss, false, chunk, size);
|
||||
} else if (chunk_dalloc_mmap(chunk, size)) {
|
||||
chunk_record(arena, &arena->chunks_szad_mmap,
|
||||
&arena->chunks_ad_mmap, chunk, size);
|
||||
&arena->chunks_ad_mmap, false, chunk, size);
|
||||
}
|
||||
}
|
||||
|
||||
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||
bool
|
||||
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||
{
|
||||
|
||||
chunk_unmap(chunk_arena_get(arena_ind), chunk, size);
|
||||
return (false);
|
||||
}
|
||||
|
||||
static rtree_node_elm_t *
|
||||
chunks_rtree_node_alloc(size_t nelms)
|
||||
{
|
||||
|
@@ -133,8 +133,12 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||
/* Success. */
|
||||
dss_max = dss_next;
|
||||
malloc_mutex_unlock(&dss_mtx);
|
||||
if (cpad_size != 0)
|
||||
chunk_unmap(arena, cpad, cpad_size);
|
||||
if (cpad_size != 0) {
|
||||
chunk_record(arena,
|
||||
&arena->chunks_szad_dss,
|
||||
&arena->chunks_ad_dss, false, cpad,
|
||||
cpad_size);
|
||||
}
|
||||
if (*zero) {
|
||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
|
||||
ret, size);
|
||||
|
12
src/extent.c
12
src/extent.c
@@ -7,13 +7,13 @@ JEMALLOC_INLINE_C int
|
||||
extent_szad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
int ret;
|
||||
size_t a_size = a->size;
|
||||
size_t b_size = b->size;
|
||||
size_t a_size = extent_node_size_get(a);
|
||||
size_t b_size = extent_node_size_get(b);
|
||||
|
||||
ret = (a_size > b_size) - (a_size < b_size);
|
||||
if (ret == 0) {
|
||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||
|
||||
ret = (a_addr > b_addr) - (a_addr < b_addr);
|
||||
}
|
||||
@@ -28,8 +28,8 @@ rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
|
||||
JEMALLOC_INLINE_C int
|
||||
extent_ad_comp(extent_node_t *a, extent_node_t *b)
|
||||
{
|
||||
uintptr_t a_addr = (uintptr_t)a->addr;
|
||||
uintptr_t b_addr = (uintptr_t)b->addr;
|
||||
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
|
||||
|
||||
return ((a_addr > b_addr) - (a_addr < b_addr));
|
||||
}
|
||||
|
61
src/huge.c
61
src/huge.c
@@ -9,7 +9,7 @@ huge_node_get(const void *ptr)
|
||||
extent_node_t *node;
|
||||
|
||||
node = chunk_lookup(ptr);
|
||||
assert(node->size != 0);
|
||||
assert(!extent_node_achunk_get(node));
|
||||
|
||||
return (node);
|
||||
}
|
||||
@@ -18,8 +18,8 @@ static bool
|
||||
huge_node_set(const void *ptr, extent_node_t *node)
|
||||
{
|
||||
|
||||
assert(node->addr == ptr);
|
||||
assert(node->size != 0);
|
||||
assert(extent_node_addr_get(node) == ptr);
|
||||
assert(!extent_node_achunk_get(node));
|
||||
return (chunk_register(ptr, node));
|
||||
}
|
||||
|
||||
@@ -73,10 +73,11 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
node->addr = ret;
|
||||
node->size = usize;
|
||||
node->zeroed = is_zeroed;
|
||||
node->arena = arena;
|
||||
extent_node_arena_set(node, arena);
|
||||
extent_node_addr_set(node, ret);
|
||||
extent_node_size_set(node, usize);
|
||||
extent_node_achunk_set(node, false);
|
||||
extent_node_zeroed_set(node, is_zeroed);
|
||||
|
||||
if (huge_node_set(ret, node)) {
|
||||
arena_chunk_dalloc_huge(arena, ret, usize);
|
||||
@@ -152,13 +153,13 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
||||
zeroed = true;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
assert(node->size != usize);
|
||||
node->size = usize;
|
||||
/* Clear node->zeroed if zeroing failed above. */
|
||||
node->zeroed = (node->zeroed && zeroed);
|
||||
assert(extent_node_size_get(node) != usize);
|
||||
extent_node_size_set(node, usize);
|
||||
/* Clear node's zeroed field if zeroing failed above. */
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
|
||||
@@ -195,12 +196,12 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
||||
}
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
node->size = usize;
|
||||
/* Clear node->zeroed if zeroing failed above. */
|
||||
node->zeroed = (node->zeroed && zeroed);
|
||||
extent_node_size_set(node, usize);
|
||||
/* Clear node's zeroed field if zeroing failed above. */
|
||||
extent_node_zeroed_set(node, extent_node_zeroed_get(node) && zeroed);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
/* Zap the excess chunks. */
|
||||
@@ -221,9 +222,9 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
||||
}
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
is_zeroed_subchunk = node->zeroed;
|
||||
is_zeroed_subchunk = extent_node_zeroed_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
/*
|
||||
@@ -238,7 +239,7 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
||||
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
/* Update the size of the huge allocation. */
|
||||
node->size = usize;
|
||||
extent_node_size_set(node, usize);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||
@@ -358,14 +359,16 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
huge_node_unset(ptr, node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
ql_remove(&arena->huge, node, ql_link);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
huge_dalloc_junk(node->addr, node->size);
|
||||
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
||||
huge_dalloc_junk(extent_node_addr_get(node),
|
||||
extent_node_size_get(node));
|
||||
arena_chunk_dalloc_huge(extent_node_arena_get(node),
|
||||
extent_node_addr_get(node), extent_node_size_get(node));
|
||||
idalloctm(tsd, node, tcache, true);
|
||||
}
|
||||
|
||||
@@ -373,7 +376,7 @@ arena_t *
|
||||
huge_aalloc(const void *ptr)
|
||||
{
|
||||
|
||||
return (huge_node_get(ptr)->arena);
|
||||
return (extent_node_arena_get(huge_node_get(ptr)));
|
||||
}
|
||||
|
||||
size_t
|
||||
@@ -384,9 +387,9 @@ huge_salloc(const void *ptr)
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
size = node->size;
|
||||
size = extent_node_size_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
return (size);
|
||||
@@ -400,9 +403,9 @@ huge_prof_tctx_get(const void *ptr)
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
tctx = node->prof_tctx;
|
||||
tctx = extent_node_prof_tctx_get(node);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
|
||||
return (tctx);
|
||||
@@ -415,8 +418,8 @@ huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
||||
arena_t *arena;
|
||||
|
||||
node = huge_node_get(ptr);
|
||||
arena = node->arena;
|
||||
arena = extent_node_arena_get(node);
|
||||
malloc_mutex_lock(&arena->huge_mtx);
|
||||
node->prof_tctx = tctx;
|
||||
extent_node_prof_tctx_set(node, tctx);
|
||||
malloc_mutex_unlock(&arena->huge_mtx);
|
||||
}
|
||||
|
@@ -103,7 +103,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
/* Lock the arena bin associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *bin_arena = chunk->node.arena;
|
||||
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
|
||||
arena_bin_t *bin = &bin_arena->bins[binind];
|
||||
|
||||
if (config_prof && bin_arena == arena) {
|
||||
@@ -125,7 +125,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->node.arena == bin_arena) {
|
||||
if (extent_node_arena_get(&chunk->node) == bin_arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_bits_t *bitselm =
|
||||
@@ -183,7 +183,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
/* Lock the arena associated with the first object. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
|
||||
tbin->avail[0]);
|
||||
arena_t *locked_arena = chunk->node.arena;
|
||||
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
|
||||
UNUSED bool idump;
|
||||
|
||||
if (config_prof)
|
||||
@@ -209,7 +209,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
||||
ptr = tbin->avail[i];
|
||||
assert(ptr != NULL);
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->node.arena == locked_arena) {
|
||||
if (extent_node_arena_get(&chunk->node) ==
|
||||
locked_arena) {
|
||||
arena_dalloc_large_junked_locked(locked_arena,
|
||||
chunk, ptr);
|
||||
} else {
|
||||
|
Reference in New Issue
Block a user