Rename extent_node_t to extent_t.

This commit is contained in:
Jason Evans
2016-03-23 21:09:28 -07:00
parent 3aea827f5e
commit a7a6f5bc96
14 changed files with 490 additions and 485 deletions

View File

@@ -214,32 +214,32 @@ arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
}
static size_t
arena_chunk_dirty_npages(const extent_node_t *node)
arena_chunk_dirty_npages(const extent_t *extent)
{
return (extent_node_size_get(node) >> LG_PAGE);
return (extent_size_get(extent) >> LG_PAGE);
}
void
arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
{
if (cache) {
extent_node_dirty_linkage_init(node);
extent_node_dirty_insert(node, &arena->runs_dirty,
extent_dirty_linkage_init(extent);
extent_dirty_insert(extent, &arena->runs_dirty,
&arena->chunks_cache);
arena->ndirty += arena_chunk_dirty_npages(node);
arena->ndirty += arena_chunk_dirty_npages(extent);
}
}
void
arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, bool dirty)
{
if (dirty) {
extent_node_dirty_remove(node);
assert(arena->ndirty >= arena_chunk_dirty_npages(node));
arena->ndirty -= arena_chunk_dirty_npages(node);
extent_dirty_remove(extent);
assert(arena->ndirty >= arena_chunk_dirty_npages(extent));
arena->ndirty -= arena_chunk_dirty_npages(extent);
}
}
@@ -516,14 +516,14 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
{
/*
* The extent node notion of "committed" doesn't directly apply to
* arena chunks. Arbitrarily mark them as committed. The commit state
* of runs is tracked individually, and upon chunk deallocation the
* entire chunk is in a consistent commit state.
* The extent notion of "committed" doesn't directly apply to arena
* chunks. Arbitrarily mark them as committed. The commit state of
* runs is tracked individually, and upon chunk deallocation the entire
* chunk is in a consistent commit state.
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
extent_init(&chunk->extent, arena, chunk, chunksize, zero, true);
extent_achunk_set(&chunk->extent, true);
return (chunk_register(tsdn, chunk, &chunk->extent));
}
static arena_chunk_t *
@@ -648,8 +648,8 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
return (NULL);
}
ql_elm_new(&chunk->node, ql_link);
ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
ql_elm_new(&chunk->extent, ql_link);
ql_tail_insert(&arena->achunks, &chunk->extent, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
@@ -661,7 +661,7 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
chunk_deregister(chunk, &chunk->extent);
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
@@ -718,7 +718,7 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
ql_remove(&arena->achunks, &chunk->node, ql_link);
ql_remove(&arena->achunks, &chunk->extent, ql_link);
spare = arena->spare;
arena->spare = chunk;
if (spare != NULL)
@@ -805,30 +805,30 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
arena_huge_malloc_stats_update_undo(arena, usize);
}
extent_node_t *
arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
extent_t *
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_node_t *node;
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_node_t)));
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
extent = ql_last(&arena->extent_cache, ql_link);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (node);
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (extent);
}
void
arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->extent_cache, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
}
static void *
@@ -1424,7 +1424,7 @@ arena_dirty_count(arena_t *arena)
{
size_t ndirty = 0;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
extent_t *chunkselm;
for (rdelm = qr_next(&arena->runs_dirty, rd_link),
chunkselm = qr_next(&arena->chunks_cache, cc_link);
@@ -1432,7 +1432,7 @@ arena_dirty_count(arena_t *arena)
size_t npages;
if (rdelm == &chunkselm->rd) {
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
npages = extent_size_get(chunkselm) >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
@@ -1456,10 +1456,10 @@ arena_dirty_count(arena_t *arena)
static size_t
arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
extent_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
extent_t *chunkselm;
size_t nstashed = 0;
/* Stash runs/chunks according to ndirty_limit. */
@@ -1470,11 +1470,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next;
extent_t *chunkselm_next;
bool zero;
UNUSED void *chunk;
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
npages = extent_size_get(chunkselm) >> LG_PAGE;
if (opt_purge == purge_mode_decay && arena->ndirty -
(nstashed + npages) < ndirty_limit)
break;
@@ -1482,18 +1482,18 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunkselm_next = qr_next(chunkselm, cc_link);
/*
* Allocate. chunkselm remains valid due to the
* dalloc_node=false argument to chunk_alloc_cache().
* dalloc_extent=false argument to chunk_alloc_cache().
*/
zero = false;
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
extent_addr_get(chunkselm),
extent_size_get(chunkselm), chunksize, &zero,
false);
assert(chunk == extent_node_addr_get(chunkselm));
assert(zero == extent_node_zeroed_get(chunkselm));
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
assert(chunk == extent_addr_get(chunkselm));
assert(zero == extent_zeroed_get(chunkselm));
extent_dirty_insert(chunkselm, purge_runs_sentinel,
purge_chunks_sentinel);
assert(npages == (extent_node_size_get(chunkselm) >>
assert(npages == (extent_size_get(chunkselm) >>
LG_PAGE));
chunkselm = chunkselm_next;
} else {
@@ -1546,11 +1546,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static size_t
arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
extent_t *purge_chunks_sentinel)
{
size_t npurged, nmadvise;
arena_runs_dirty_link_t *rdelm;
extent_node_t *chunkselm;
extent_t *chunkselm;
if (config_stats)
nmadvise = 0;
@@ -1571,7 +1571,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
* decommitted, or purged, depending on chunk
* deallocation policy.
*/
size_t size = extent_node_size_get(chunkselm);
size_t size = extent_size_get(chunkselm);
npages = size >> LG_PAGE;
chunkselm = qr_next(chunkselm, cc_link);
} else {
@@ -1639,10 +1639,10 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
extent_t *purge_chunks_sentinel)
{
arena_runs_dirty_link_t *rdelm, *rdelm_next;
extent_node_t *chunkselm;
extent_t *chunkselm;
/* Deallocate chunks/runs. */
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
@@ -1650,14 +1650,13 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_node_t *chunkselm_next = qr_next(chunkselm,
cc_link);
void *addr = extent_node_addr_get(chunkselm);
size_t size = extent_node_size_get(chunkselm);
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
arena_node_dalloc(tsdn, arena, chunkselm);
extent_t *chunkselm_next = qr_next(chunkselm, cc_link);
void *addr = extent_addr_get(chunkselm);
size_t size = extent_size_get(chunkselm);
bool zeroed = extent_zeroed_get(chunkselm);
bool committed = extent_committed_get(chunkselm);
extent_dirty_remove(chunkselm);
arena_extent_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
size, zeroed, committed);
@@ -1692,7 +1691,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
extent_t purge_chunks_sentinel;
arena->purging = true;
@@ -1708,7 +1707,7 @@ arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
extent_dirty_linkage_init(&purge_chunks_sentinel);
npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
@@ -1783,7 +1782,7 @@ void
arena_reset(tsd_t *tsd, arena_t *arena)
{
unsigned i;
extent_node_t *node;
extent_t *extent;
/*
* Locking in this function is unintuitive. The caller guarantees that
@@ -1801,9 +1800,9 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Remove large allocations from prof sample set. */
if (config_prof && opt_prof) {
ql_foreach(node, &arena->achunks, ql_link) {
ql_foreach(extent, &arena->achunks, ql_link) {
arena_achunk_prof_reset(tsd, arena,
extent_node_addr_get(node));
extent_addr_get(extent));
}
}
@@ -1815,9 +1814,9 @@ arena_reset(tsd_t *tsd, arena_t *arena)
/* Huge allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
for (extent = ql_last(&arena->huge, ql_link); extent != NULL; extent =
ql_last(&arena->huge, ql_link)) {
void *ptr = extent_node_addr_get(node);
void *ptr = extent_addr_get(extent);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
@@ -1854,18 +1853,18 @@ arena_reset(tsd_t *tsd, arena_t *arena)
* chains directly correspond.
*/
qr_new(&arena->runs_dirty, rd_link);
for (node = qr_next(&arena->chunks_cache, cc_link);
node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
qr_new(&node->rd, rd_link);
qr_meld(&arena->runs_dirty, &node->rd, rd_link);
for (extent = qr_next(&arena->chunks_cache, cc_link);
extent != &arena->chunks_cache; extent = qr_next(extent, cc_link)) {
qr_new(&extent->rd, rd_link);
qr_meld(&arena->runs_dirty, &extent->rd, rd_link);
}
/* Arena chunks. */
for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, node, ql_link);
for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
= ql_last(&arena->achunks, ql_link)) {
ql_remove(&arena->achunks, extent, ql_link);
arena_chunk_discard(tsd_tsdn(tsd), arena,
extent_node_addr_get(node));
extent_addr_get(extent));
}
/* Spare. */
@@ -2649,8 +2648,8 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
if (run == bin->runcur)
bin->runcur = NULL;
else {
szind_t binind = arena_bin_index(extent_node_arena_get(
&chunk->node), bin);
szind_t binind = arena_bin_index(extent_arena_get(
&chunk->extent), bin);
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
@@ -3018,7 +3017,7 @@ arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = extent_node_arena_get(&chunk->node);
arena = extent_arena_get(&chunk->extent);
if (oldsize < usize_max) {
bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
@@ -3080,7 +3079,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
arena_decay_tick(tsdn, extent_arena_get(&chunk->extent));
return (false);
} else {
return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
@@ -3404,9 +3403,9 @@ arena_new(tsdn_t *tsdn, unsigned ind)
if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
ql_new(&arena->node_cache);
if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
WITNESS_RANK_ARENA_NODE_CACHE))
ql_new(&arena->extent_cache);
if (malloc_mutex_init(&arena->extent_cache_mtx, "arena_extent_cache",
WITNESS_RANK_ARENA_EXTENT_CACHE))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
@@ -3492,7 +3491,7 @@ void
arena_prefork2(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
malloc_mutex_prefork(tsdn, &arena->extent_cache_mtx);
}
void
@@ -3513,7 +3512,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_parent(tsdn, &arena->lock);
}
@@ -3526,7 +3525,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->extent_cache_mtx);
malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
malloc_mutex_postfork_child(tsdn, &arena->lock);
}

View File

@@ -6,59 +6,59 @@
static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
static extent_t *base_extents;
static size_t base_allocated;
static size_t base_resident;
static size_t base_mapped;
/******************************************************************************/
static extent_node_t *
base_node_try_alloc(tsdn_t *tsdn)
static extent_t *
base_extent_try_alloc(tsdn_t *tsdn)
{
extent_node_t *node;
extent_t *extent;
malloc_mutex_assert_owner(tsdn, &base_mtx);
if (base_nodes == NULL)
if (base_extents == NULL)
return (NULL);
node = base_nodes;
base_nodes = *(extent_node_t **)node;
return (node);
extent = base_extents;
base_extents = *(extent_t **)extent;
return (extent);
}
static void
base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
base_extent_dalloc(tsdn_t *tsdn, extent_t *extent)
{
malloc_mutex_assert_owner(tsdn, &base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
*(extent_t **)extent = base_extents;
base_extents = extent;
}
static extent_node_t *
static extent_t *
base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_node_t *node;
extent_t *extent;
size_t csize, nsize;
void *addr;
malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
extent = base_extent_try_alloc(tsdn);
/* Allocate enough space to also carve an extent out if necessary. */
nsize = (extent == NULL) ? CACHELINE_CEILING(sizeof(extent_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
base_node_dalloc(tsdn, node);
if (extent != NULL)
base_extent_dalloc(tsdn, extent);
return (NULL);
}
base_mapped += csize;
if (node == NULL) {
node = (extent_node_t *)addr;
if (extent == NULL) {
extent = (extent_t *)addr;
addr = (void *)((uintptr_t)addr + nsize);
csize -= nsize;
if (config_stats) {
@@ -66,8 +66,8 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
extent_node_init(node, NULL, addr, csize, true, true);
return (node);
extent_init(extent, NULL, addr, csize, true, true);
return (extent);
}
/*
@@ -80,8 +80,8 @@ base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
extent_node_t *node;
extent_node_t key;
extent_t *extent;
extent_t key;
/*
* Round size up to nearest multiple of the cacheline size, so that
@@ -90,28 +90,28 @@ base_alloc(tsdn_t *tsdn, size_t size)
csize = CACHELINE_CEILING(size);
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
extent_init(&key, NULL, NULL, usize, false, false);
malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
extent = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (extent != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
extent_tree_szad_remove(&base_avail_szad, extent);
} else {
/* Try to allocate more space. */
node = base_chunk_alloc(tsdn, csize);
extent = base_chunk_alloc(tsdn, csize);
}
if (node == NULL) {
if (extent == NULL) {
ret = NULL;
goto label_return;
}
ret = extent_node_addr_get(node);
if (extent_node_size_get(node) > csize) {
extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
ret = extent_addr_get(extent);
if (extent_size_get(extent) > csize) {
extent_addr_set(extent, (void *)((uintptr_t)ret + csize));
extent_size_set(extent, extent_size_get(extent) - csize);
extent_tree_szad_insert(&base_avail_szad, extent);
} else
base_node_dalloc(tsdn, node);
base_extent_dalloc(tsdn, extent);
if (config_stats) {
base_allocated += csize;
/*
@@ -147,7 +147,7 @@ base_boot(void)
if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
base_nodes = NULL;
base_extents = NULL;
return (false);
}

View File

@@ -141,15 +141,15 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
}
bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_t *extent)
{
assert(extent_node_addr_get(node) == chunk);
assert(extent_addr_get(extent) == chunk);
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
if (rtree_set(&chunks_rtree, (uintptr_t)chunk, extent))
return (true);
if (config_prof && opt_prof) {
size_t size = extent_node_size_get(node);
size_t size = extent_size_get(extent);
size_t nadd = (size == 0) ? 1 : size / chunksize;
size_t cur = atomic_add_z(&curchunks, nadd);
size_t high = atomic_read_z(&highchunks);
@@ -168,14 +168,14 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
}
void
chunk_deregister(const void *chunk, const extent_node_t *node)
chunk_deregister(const void *chunk, const extent_t *extent)
{
bool err;
err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
assert(!err);
if (config_prof && opt_prof) {
size_t size = extent_node_size_get(node);
size_t size = extent_size_get(extent);
size_t nsub = (size == 0) ? 1 : size / chunksize;
assert(atomic_read_z(&curchunks) >= nsub);
atomic_sub_z(&curchunks, nsub);
@@ -186,15 +186,15 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* fits.
*/
static extent_node_t *
static extent_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
{
extent_node_t key;
extent_t key;
assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, false, false);
extent_init(&key, arena, NULL, size, false, false);
return (extent_tree_szad_nsearch(chunks_szad, &key));
}
@@ -202,20 +202,20 @@ static void *
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
bool dalloc_extent)
{
void *ret;
extent_node_t *node;
extent_t *extent;
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
assert(new_addr == NULL || alignment == chunksize);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
* Cached chunks use the extent linkage embedded in their headers, in
* which case dalloc_extent is true, and new_addr is non-NULL because
* we're operating on a specific chunk.
*/
assert(dalloc_node || new_addr != NULL);
assert(dalloc_extent || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
/* Beware size_t wrap-around. */
@@ -224,56 +224,55 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
false);
node = extent_tree_ad_search(chunks_ad, &key);
extent_t key;
extent_init(&key, arena, new_addr, alloc_size, false, false);
extent = extent_tree_ad_search(chunks_ad, &key);
} else {
node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
extent = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
if (extent == NULL || (new_addr != NULL && extent_size_get(extent) <
size)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
alignment) - (uintptr_t)extent_node_addr_get(node);
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
alignment) - (uintptr_t)extent_addr_get(extent);
assert(new_addr == NULL || leadsize == 0);
assert(extent_node_size_get(node) >= leadsize + size);
trailsize = extent_node_size_get(node) - leadsize - size;
ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
zeroed = extent_node_zeroed_get(node);
assert(extent_size_get(extent) >= leadsize + size);
trailsize = extent_size_get(extent) - leadsize - size;
ret = (void *)((uintptr_t)extent_addr_get(extent) + leadsize);
zeroed = extent_zeroed_get(extent);
if (zeroed)
*zero = true;
committed = extent_node_committed_get(node);
committed = extent_committed_get(extent);
if (committed)
*commit = true;
/* Split the lead. */
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
chunk_hooks->split(extent_addr_get(extent),
extent_size_get(extent), leadsize, size, false, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(chunks_ad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
/* Remove extent from the tree. */
extent_tree_szad_remove(chunks_szad, extent);
extent_tree_ad_remove(chunks_ad, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
extent_node_size_set(node, leadsize);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
extent_size_set(extent, leadsize);
extent_tree_szad_insert(chunks_szad, extent);
extent_tree_ad_insert(chunks_ad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
extent = NULL;
}
if (trailsize != 0) {
/* Split the trail. */
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node);
if (dalloc_extent && extent != NULL)
arena_extent_dalloc(tsdn, arena, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
chunks_ad, cache, ret, size + trailsize, zeroed,
@@ -281,9 +280,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
if (extent == NULL) {
extent = arena_extent_alloc(tsdn, arena);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks,
chunks_szad, chunks_ad, cache, ret, size +
@@ -291,12 +290,12 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL);
}
}
extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
trailsize, zeroed, committed);
extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(chunks_ad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
node = NULL;
extent_tree_szad_insert(chunks_szad, extent);
extent_tree_ad_insert(chunks_ad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
extent = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
@@ -306,9 +305,9 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
arena_node_dalloc(tsdn, arena, node);
assert(dalloc_extent || extent != NULL);
if (dalloc_extent && extent != NULL)
arena_extent_dalloc(tsdn, arena, extent);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@@ -381,7 +380,8 @@ chunk_alloc_base(size_t size)
void *
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
void *new_addr, size_t size, size_t alignment, bool *zero,
bool dalloc_extent)
{
void *ret;
bool commit;
@@ -394,7 +394,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
commit = true;
ret = chunk_recycle(tsdn, arena, chunk_hooks,
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
new_addr, size, alignment, zero, &commit, dalloc_node);
new_addr, size, alignment, zero, &commit, dalloc_extent);
if (ret == NULL)
return (NULL);
assert(commit);
@@ -480,40 +480,39 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
{
bool unzeroed;
extent_node_t *node, *prev;
extent_node_t key;
extent_t *extent, *prev;
extent_t key;
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
extent_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, false,
false);
extent = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && extent_node_addr_get(node) ==
extent_node_addr_get(&key) && extent_node_committed_get(node) ==
committed && !chunk_hooks->merge(chunk, size,
extent_node_addr_get(node), extent_node_size_get(node), false,
arena->ind)) {
if (extent != NULL && extent_addr_get(extent) == extent_addr_get(&key)
&& extent_committed_get(extent) == committed &&
!chunk_hooks->merge(chunk, size, extent_addr_get(extent),
extent_size_get(extent), false, arena->ind)) {
/*
* Coalesce chunk with the following address range. This does
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, chunk);
extent_node_size_set(node, size + extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
extent_tree_szad_remove(chunks_szad, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
extent_addr_set(extent, chunk);
extent_size_set(extent, size + extent_size_get(extent));
extent_zeroed_set(extent, extent_zeroed_get(extent) &&
!unzeroed);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
extent_tree_szad_insert(chunks_szad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
/* Coalescing forward failed, so insert a new extent. */
extent = arena_extent_alloc(tsdn, arena);
if (extent == NULL) {
/*
* Node allocation failed, which is an exceedingly
* unlikely failure. Leak chunk after making sure its
@@ -526,39 +525,38 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
}
goto label_return;
}
extent_node_init(node, arena, chunk, size, !unzeroed,
extent_init(extent, arena, chunk, size, !unzeroed,
committed);
extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
extent_tree_ad_insert(chunks_ad, extent);
extent_tree_szad_insert(chunks_szad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
}
/* Try to coalesce backward. */
prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
extent_node_size_get(prev)) == chunk &&
extent_node_committed_get(prev) == committed &&
!chunk_hooks->merge(extent_node_addr_get(prev),
extent_node_size_get(prev), chunk, size, false, arena->ind)) {
prev = extent_tree_ad_prev(chunks_ad, extent);
if (prev != NULL && (void *)((uintptr_t)extent_addr_get(prev) +
extent_size_get(prev)) == chunk && extent_committed_get(prev) ==
committed && !chunk_hooks->merge(extent_addr_get(prev),
extent_size_get(prev), chunk, size, false, arena->ind)) {
/*
* Coalesce chunk with the previous address range. This does
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
* remove/insert extent from/into chunks_szad.
*/
extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(chunks_ad, prev);
arena_chunk_cache_maybe_remove(arena, prev, cache);
extent_tree_szad_remove(chunks_szad, node);
arena_chunk_cache_maybe_remove(arena, node, cache);
extent_node_addr_set(node, extent_node_addr_get(prev));
extent_node_size_set(node, extent_node_size_get(prev) +
extent_node_size_get(node));
extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
extent_node_zeroed_get(node));
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
extent_tree_szad_remove(chunks_szad, extent);
arena_chunk_cache_maybe_remove(arena, extent, cache);
extent_addr_set(extent, extent_addr_get(prev));
extent_size_set(extent, extent_size_get(prev) +
extent_size_get(extent));
extent_zeroed_set(extent, extent_zeroed_get(prev) &&
extent_zeroed_get(extent));
extent_tree_szad_insert(chunks_szad, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
arena_node_dalloc(tsdn, arena, prev);
arena_extent_dalloc(tsdn, arena, prev);
}
label_return:

View File

@@ -15,11 +15,11 @@ extent_quantize(size_t size)
}
JEMALLOC_INLINE_C int
extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
extent_szad_comp(const extent_t *a, const extent_t *b)
{
int ret;
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
size_t a_qsize = extent_quantize(extent_size_get(a));
size_t b_qsize = extent_quantize(extent_size_get(b));
/*
* Compare based on quantized size rather than size, in order to sort
@@ -27,8 +27,8 @@ extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
*/
ret = (a_qsize > b_qsize) - (a_qsize < b_qsize);
if (ret == 0) {
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
ret = (a_addr > b_addr) - (a_addr < b_addr);
}
@@ -37,17 +37,17 @@ extent_szad_comp(const extent_node_t *a, const extent_node_t *b)
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, szad_link,
rb_gen(, extent_tree_szad_, extent_tree_t, extent_t, szad_link,
extent_szad_comp)
JEMALLOC_INLINE_C int
extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
extent_ad_comp(const extent_t *a, const extent_t *b)
{
uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
return ((a_addr > b_addr) - (a_addr < b_addr));
}
/* Generate red-black tree functions. */
rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
rb_gen(, extent_tree_ad_, extent_tree_t, extent_t, ad_link, extent_ad_comp)

View File

@@ -3,40 +3,40 @@
/******************************************************************************/
static extent_node_t *
huge_node_get(const void *ptr)
static extent_t *
huge_extent_get(const void *ptr)
{
extent_node_t *node;
extent_t *extent;
node = chunk_lookup(ptr, true);
assert(!extent_node_achunk_get(node));
extent = chunk_lookup(ptr, true);
assert(!extent_achunk_get(extent));
return (node);
return (extent);
}
static bool
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_extent_set(tsdn_t *tsdn, const void *ptr, extent_t *extent)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(tsdn, ptr, node));
assert(extent_addr_get(extent) == ptr);
assert(!extent_achunk_get(extent));
return (chunk_register(tsdn, ptr, extent));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_extent_reset(tsdn_t *tsdn, const void *ptr, extent_t *extent)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
err = huge_extent_set(tsdn, ptr, extent);
assert(!err);
}
static void
huge_node_unset(const void *ptr, const extent_node_t *node)
huge_extent_unset(const void *ptr, const extent_t *extent)
{
chunk_deregister(ptr, node);
chunk_deregister(ptr, extent);
}
void *
@@ -54,7 +54,7 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
{
void *ret;
size_t ausize;
extent_node_t *node;
extent_t *extent;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@@ -66,10 +66,10 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
/* Allocate an extent with which to track the chunk. */
extent = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_t)),
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
if (node == NULL)
if (extent == NULL)
return (NULL);
/*
@@ -81,22 +81,22 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsdn, node, NULL, true, true);
idalloctm(tsdn, extent, NULL, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, is_zeroed, true);
extent_init(extent, arena, ret, usize, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
if (huge_extent_set(tsdn, ret, extent)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
idalloctm(tsdn, node, NULL, true, true);
idalloctm(tsdn, extent, NULL, true, true);
return (NULL);
}
/* Insert node into huge. */
/* Insert extent into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
@@ -137,7 +137,7 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
extent_t *extent;
arena_t *arena;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed;
@@ -150,9 +150,9 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
if (oldsize == usize)
return;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
pre_zeroed = extent_zeroed_get(extent);
/* Fill if necessary (shrinking). */
if (oldsize > usize) {
@@ -171,12 +171,12 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
assert(extent_size_get(extent) != usize);
huge_extent_unset(ptr, extent);
extent_size_set(extent, usize);
huge_extent_reset(tsdn, ptr, extent);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
extent_zeroed_set(extent, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
@@ -199,15 +199,15 @@ static bool
huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize)
{
extent_node_t *node;
extent_t *extent;
arena_t *arena;
chunk_hooks_t chunk_hooks;
size_t cdiff;
bool pre_zeroed, post_zeroed;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
pre_zeroed = extent_zeroed_get(extent);
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
@@ -235,11 +235,11 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_extent_unset(ptr, extent);
extent_size_set(extent, usize);
huge_extent_reset(tsdn, ptr, extent);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
extent_zeroed_set(extent, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
@@ -250,15 +250,16 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
static bool
huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
extent_node_t *node;
size_t usize, bool zero)
{
extent_t *extent;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
is_zeroed_subchunk = extent_zeroed_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/*
@@ -273,9 +274,9 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_extent_unset(ptr, extent);
extent_size_set(extent, usize);
huge_extent_reset(tsdn, ptr, extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
@@ -390,21 +391,21 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
void
huge_dalloc(tsdn_t *tsdn, void *ptr)
{
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
huge_extent_unset(ptr, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
ql_remove(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(tsdn, extent_node_addr_get(node),
extent_node_size_get(node));
arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
idalloctm(tsdn, node, NULL, true, true);
huge_dalloc_junk(tsdn, extent_addr_get(extent),
extent_size_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
extent_addr_get(extent), extent_size_get(extent));
idalloctm(tsdn, extent, NULL, true, true);
arena_decay_tick(tsdn, arena);
}
@@ -413,20 +414,20 @@ arena_t *
huge_aalloc(const void *ptr)
{
return (extent_node_arena_get(huge_node_get(ptr)));
return (extent_arena_get(huge_extent_get(ptr)));
}
size_t
huge_salloc(tsdn_t *tsdn, const void *ptr)
{
size_t size;
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
size = extent_node_size_get(node);
size = extent_size_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (size);
@@ -436,13 +437,13 @@ prof_tctx_t *
huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
tctx = extent_prof_tctx_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (tctx);
@@ -451,13 +452,13 @@ huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
void
huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
extent_t *extent;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
extent = huge_extent_get(ptr);
arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
extent_prof_tctx_set(extent, tctx);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
}

View File

@@ -103,7 +103,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
/* Lock the arena bin associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1));
arena_t *bin_arena = extent_node_arena_get(&chunk->node);
arena_t *bin_arena = extent_arena_get(&chunk->extent);
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
@@ -126,7 +126,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) == bin_arena) {
if (extent_arena_get(&chunk->extent) == bin_arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
@@ -185,7 +185,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
/* Lock the arena associated with the first object. */
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
*(tbin->avail - 1));
arena_t *locked_arena = extent_node_arena_get(&chunk->node);
arena_t *locked_arena = extent_arena_get(&chunk->extent);
UNUSED bool idump;
if (config_prof)
@@ -211,8 +211,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
ptr = *(tbin->avail - 1 - i);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
if (extent_arena_get(&chunk->extent) == locked_arena) {
arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
locked_arena, chunk, ptr);
} else {