Refactor chunk_dalloc_{cache,wrapper}() to take extent arguments.

Rename arena_extent_[d]alloc() to extent_[d]alloc().

Move all chunk [de]registration responsibility into chunk.c.
This commit is contained in:
Jason Evans 2016-05-23 14:56:35 -07:00
parent de0305a7f3
commit 6c94470822
10 changed files with 148 additions and 199 deletions

View File

@ -478,17 +478,14 @@ extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero);
void arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
chunk_hooks_t *chunk_hooks, extent_t *extent);
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
bool cache);
extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
size_t usize);
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
extent_t *extent, size_t oldsize);
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,

View File

@ -52,9 +52,6 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const extent_t *extent);
void chunk_deregister(tsdn_t *tsdn, const extent_t *extent);
void chunk_reregister(tsdn_t *tsdn, const extent_t *extent);
extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero);
@ -62,10 +59,9 @@ extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
bool *zero, bool *commit);
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
chunk_hooks_t *chunk_hooks, extent_t *extent);
void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
bool committed);
chunk_hooks_t *chunk_hooks, extent_t *extent);
bool chunk_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);

View File

@ -62,6 +62,9 @@ typedef ph(extent_t) extent_heap_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
#ifdef JEMALLOC_JET
typedef size_t (extent_size_quantize_t)(size_t);
extern extent_size_quantize_t *extent_size_quantize_floor;

View File

@ -37,8 +37,6 @@ arena_decay_time_get
arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_extent_alloc
arena_extent_dalloc
arena_get
arena_ichoose
arena_init
@ -166,7 +164,6 @@ chunk_dalloc_cache
chunk_dalloc_mmap
chunk_dalloc_wrapper
chunk_decommit_wrapper
chunk_deregister
chunk_dss_boot
chunk_dss_postfork_child
chunk_dss_postfork_parent
@ -184,8 +181,6 @@ chunk_postfork_child
chunk_postfork_parent
chunk_prefork
chunk_purge_wrapper
chunk_register
chunk_reregister
chunk_split_wrapper
chunks_rtree
chunksize
@ -214,10 +209,12 @@ extent_active_get
extent_active_set
extent_addr_get
extent_addr_set
extent_alloc
extent_arena_get
extent_arena_set
extent_committed_get
extent_committed_set
extent_dalloc
extent_dirty_insert
extent_dirty_remove
extent_init

View File

@ -249,23 +249,22 @@ arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
static void
arena_chunk_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed)
chunk_hooks_t *chunk_hooks, extent_t *extent)
{
malloc_mutex_assert_owner(tsdn, &arena->lock);
chunk_dalloc_cache(tsdn, arena, chunk_hooks, chunk, size, committed);
chunk_dalloc_cache(tsdn, arena, chunk_hooks, extent);
arena_maybe_purge(tsdn, arena);
}
void
arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed)
chunk_hooks_t *chunk_hooks, extent_t *extent)
{
malloc_mutex_lock(tsdn, &arena->lock);
arena_chunk_cache_dalloc_locked(tsdn, arena, chunk_hooks, chunk, size,
committed);
arena_chunk_cache_dalloc_locked(tsdn, arena, chunk_hooks, extent);
malloc_mutex_unlock(tsdn, &arena->lock);
}
@ -582,33 +581,14 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks,
extent_addr_get(extent), extent_size_get(extent), 0,
map_bias << LG_PAGE)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
extent_addr_get(extent), extent_size_get(extent),
extent_zeroed_get(extent),
extent_committed_get(extent));
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, extent);
extent = NULL;
}
}
if (extent != NULL) {
if (extent != NULL)
extent_slab_set(extent, true);
if (chunk_register(tsdn, extent)) {
if (!*commit) {
/* Undo commit of header. */
chunk_decommit_wrapper(tsdn, arena, chunk_hooks,
extent_addr_get(extent),
extent_size_get(extent), 0, map_bias <<
LG_PAGE);
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
extent_addr_get(extent), extent_size_get(extent),
extent_zeroed_get(extent),
extent_committed_get(extent));
extent = NULL;
}
}
malloc_mutex_lock(tsdn, &arena->lock);
return (extent);
@ -625,13 +605,6 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
chunksize, chunksize, zero);
if (extent != NULL) {
extent_slab_set(extent, true);
if (chunk_register(tsdn, extent)) {
arena_chunk_cache_dalloc_locked(tsdn, arena,
&chunk_hooks, extent_addr_get(extent),
extent_size_get(extent), true);
return (NULL);
}
*commit = true;
}
if (extent == NULL) {
@ -722,14 +695,13 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(tsdn, extent);
committed = (arena_mapbits_decommitted_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) == 0);
if (!committed) {
extent_committed_set(extent,
(arena_mapbits_decommitted_get((arena_chunk_t *)
extent_addr_get(extent), map_bias) == 0));
extent_slab_set(extent, false);
if (!extent_committed_get(extent)) {
/*
* Decommit the header. Mark the chunk as decommitted even if
* header decommit fails, since treating a partially committed
@ -741,15 +713,12 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
map_bias << LG_PAGE);
}
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks,
extent_addr_get(extent), extent_size_get(extent), committed);
if (config_stats) {
arena->stats.mapped -= extent_size_get(extent);
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
}
arena_extent_dalloc(tsdn, arena, extent);
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
}
static void
@ -852,32 +821,6 @@ arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
arena_huge_malloc_stats_update(arena, usize);
}
extent_t *
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
extent = ql_last(&arena->extent_cache, ql_link);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_t)));
}
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (extent);
}
void
arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->extent_cache, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
}
static extent_t *
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
@ -931,21 +874,21 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
void
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
arena_huge_dalloc_stats_update(arena, extent_size_get(extent));
arena->stats.mapped -= extent_size_get(extent);
}
arena_nactive_sub(arena, usize >> LG_PAGE);
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, chunk, csize,
true);
if ((extent_size_get(extent) & chunksize_mask) != 0)
extent_size_set(extent, CHUNK_CEILING(extent_size_get(extent)));
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
malloc_mutex_unlock(tsdn, &arena->lock);
}
@ -1656,15 +1599,10 @@ arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
rdelm_next = qr_next(rdelm, rd_link);
if (rdelm == &chunkselm->rd) {
extent_t *chunkselm_next = qr_next(chunkselm, cc_link);
void *addr = extent_addr_get(chunkselm);
size_t size = extent_size_get(chunkselm);
bool zeroed = extent_zeroed_get(chunkselm);
bool committed = extent_committed_get(chunkselm);
extent_dirty_remove(chunkselm);
arena_extent_dalloc(tsdn, arena, chunkselm);
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
chunkselm);
chunkselm = chunkselm_next;
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
size, zeroed, committed);
} else {
extent_t *extent = iealloc(tsdn, rdelm);
arena_chunk_t *chunk =

View File

@ -51,7 +51,7 @@ const chunk_hooks_t chunk_hooks_default = {
static void chunk_record(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_heap_t extent_heaps[NPSIZES], bool cache,
void *chunk, size_t size, bool zeroed, bool committed);
extent_t *extent);
/******************************************************************************/
@ -203,7 +203,7 @@ extent_rtree_release(tsdn_t *tsdn, rtree_elm_t *elm_a, rtree_elm_t *elm_b)
rtree_elm_release(tsdn, &chunks_rtree, elm_b);
}
bool
static bool
chunk_register(tsdn_t *tsdn, const extent_t *extent)
{
rtree_elm_t *elm_a, *elm_b;
@ -232,7 +232,7 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
return (false);
}
void
static void
chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
{
rtree_elm_t *elm_a, *elm_b;
@ -249,15 +249,6 @@ chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
}
}
void
chunk_reregister(tsdn_t *tsdn, const extent_t *extent)
{
bool err;
err = chunk_register(tsdn, extent);
assert(!err);
}
/*
* Do first-best-fit chunk selection, i.e. select the lowest chunk that best
* fits.
@ -282,7 +273,7 @@ chunk_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES],
static void
chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
void *addr, size_t size)
extent_t *extent)
{
/*
@ -290,9 +281,11 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
* that this is only a virtual memory leak.
*/
if (cache) {
chunk_purge_wrapper(tsdn, arena, chunk_hooks, addr, size, 0,
size);
chunk_purge_wrapper(tsdn, arena, chunk_hooks,
extent_addr_get(extent), extent_size_get(extent), 0,
extent_size_get(extent));
}
extent_dalloc(tsdn, arena, extent);
}
static extent_t *
@ -351,9 +344,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
leadsize, size + trailsize);
if (extent == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache,
extent_addr_get(lead), extent_size_get(lead));
arena_extent_dalloc(tsdn, arena, lead);
chunk_leak(tsdn, arena, chunk_hooks, cache, lead);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
@ -366,9 +357,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
extent, size, trailsize);
if (trail == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache,
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
chunk_leak(tsdn, arena, chunk_hooks, cache, extent);
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
@ -381,9 +370,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) {
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache,
extent_addr_get(extent), extent_size_get(extent),
extent_zeroed_get(extent), extent_committed_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
extent);
return (NULL);
}
@ -529,7 +516,7 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (extent == NULL) {
void *chunk;
extent = arena_extent_alloc(tsdn, arena);
extent = extent_alloc(tsdn, arena);
if (extent == NULL)
return (NULL);
chunk = chunk_hooks->alloc(new_addr, size, alignment,
@ -540,6 +527,11 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
false);
}
if (chunk_register(tsdn, extent)) {
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
return (NULL);
}
return (extent);
}
@ -590,29 +582,21 @@ chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
static void
chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_heap_t extent_heaps[NPSIZES], bool cache, void *chunk, size_t size,
bool zeroed, bool committed)
extent_heap_t extent_heaps[NPSIZES], bool cache, extent_t *extent)
{
extent_t *extent, *prev, *next;
extent_t *prev, *next;
assert(!cache || !zeroed);
assert(!cache || !extent_zeroed_get(extent));
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
/* Create/initialize/insert extent. */
extent = arena_extent_alloc(tsdn, arena);
if (extent == NULL) {
chunk_leak(tsdn, arena, chunk_hooks, cache, chunk, size);
goto label_return;
}
extent_init(extent, arena, chunk, size, false, !cache && zeroed,
committed, false);
if (chunk_register(tsdn, extent)) {
arena_extent_dalloc(tsdn, arena, extent);
chunk_leak(tsdn, arena, chunk_hooks, cache, chunk, size);
goto label_return;
}
assert((extent_size_get(extent) & chunksize_mask) == 0);
extent_active_set(extent, false);
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
extent_slab_set(extent, false);
assert(chunk_lookup(tsdn, extent_addr_get(extent), true) == extent);
extent_heaps_insert(extent_heaps, extent);
arena_chunk_cache_maybe_insert(arena, extent, cache);
@ -632,22 +616,24 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_heaps, cache);
}
label_return:
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
}
void
chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool committed)
extent_t *extent)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(extent_addr_get(extent) != NULL);
assert(CHUNK_ADDR2BASE(extent_addr_get(extent)) ==
extent_addr_get(extent));
assert(extent_size_get(extent) != 0);
assert((extent_size_get(extent) & chunksize_mask) == 0);
extent_zeroed_set(extent, false);
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
chunk, size, false, committed);
extent);
}
static bool
@ -662,30 +648,40 @@ chunk_dalloc_default(void *chunk, size_t size, bool committed,
void
chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
void *chunk, size_t size, bool zeroed, bool committed)
extent_t *extent)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(extent_addr_get(extent) != NULL);
assert(CHUNK_ADDR2BASE(extent_addr_get(extent)) ==
extent_addr_get(extent));
assert(extent_size_get(extent) != 0);
assert((extent_size_get(extent) & chunksize_mask) == 0);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
if (!chunk_hooks->dalloc(extent_addr_get(extent),
extent_size_get(extent), extent_committed_get(extent),
arena->ind)) {
chunk_deregister(tsdn, extent);
extent_dalloc(tsdn, arena, extent);
return;
/* Try to decommit; purge if that fails. */
if (committed) {
committed = chunk_hooks->decommit(chunk, size, 0, size,
arena->ind);
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_retained, false,
chunk, size, zeroed, committed);
/* Try to decommit; purge if that fails. */
if (extent_committed_get(extent)) {
extent_committed_set(extent,
chunk_hooks->decommit(extent_addr_get(extent),
extent_size_get(extent), 0, extent_size_get(extent),
arena->ind));
}
extent_zeroed_set(extent, !extent_committed_get(extent) ||
!chunk_hooks->purge(extent_addr_get(extent),
extent_size_get(extent), 0, extent_size_get(extent), arena->ind));
if (config_stats)
arena->stats.retained += size;
arena->stats.retained += extent_size_get(extent);
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_retained, false,
extent);
}
static bool
@ -771,7 +767,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
trail = arena_extent_alloc(tsdn, arena);
trail = extent_alloc(tsdn, arena);
if (trail == NULL)
goto label_error_a;
@ -814,7 +810,7 @@ label_error_d:
label_error_c:
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
label_error_b:
arena_extent_dalloc(tsdn, arena, trail);
extent_dalloc(tsdn, arena, trail);
label_error_a:
return (NULL);
}
@ -841,6 +837,9 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
{
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
assert((extent_size_get(a) & chunksize_mask) == 0);
assert((extent_size_get(b) & chunksize_mask) == 0);
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
@ -871,7 +870,7 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
arena_extent_dalloc(tsdn, extent_arena_get(b), b);
extent_dalloc(tsdn, extent_arena_get(b), b);
return (false);
}

View File

@ -89,7 +89,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* malloc.
*/
do {
void *ret, *cpad, *dss_next;
void *ret, *cpad_addr, *dss_next;
extent_t *cpad;
size_t gap_size, cpad_size;
intptr_t incr;
/* Avoid an unnecessary system call. */
@ -114,10 +115,19 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
cpad_addr = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
cpad_size = (uintptr_t)ret - (uintptr_t)cpad_addr;
if (cpad_size != 0) {
cpad = extent_alloc(tsdn, arena);
if (cpad == NULL) {
malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
extent_init(cpad, arena, cpad_addr, cpad_size,
false, false, true, false);
}
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
@ -135,8 +145,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
false, true);
&chunk_hooks, cpad);
}
if (*zero)
memset(ret, 0, size);

View File

@ -73,6 +73,5 @@ chunk_dalloc_mmap(void *chunk, size_t size)
if (config_munmap)
pages_unmap(chunk, size);
return (!config_munmap);
}

View File

@ -3,6 +3,32 @@
/******************************************************************************/
extent_t *
extent_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_t *extent;
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
extent = ql_last(&arena->extent_cache, ql_link);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (base_alloc(tsdn, sizeof(extent_t)));
}
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (extent);
}
void
extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
{
malloc_mutex_lock(tsdn, &arena->extent_cache_mtx);
ql_elm_new(extent, ql_link);
ql_tail_insert(&arena->extent_cache, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
}
#ifdef JEMALLOC_JET
#undef extent_size_quantize_floor
#define extent_size_quantize_floor JEMALLOC_N(n_extent_size_quantize_floor)

View File

@ -43,13 +43,6 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (usize < extent_size_get(extent))
extent_size_set(extent, usize);
if (chunk_register(tsdn, extent)) {
arena_chunk_dalloc_huge(tsdn, arena, extent_addr_get(extent),
usize);
arena_extent_dalloc(tsdn, arena, extent);
return (NULL);
}
/* Insert extent into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_elm_new(extent, ql_link);
@ -57,10 +50,14 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
memset(extent_addr_get(extent), 0, usize);
} else if (config_fill && unlikely(opt_junk_alloc))
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, usize);
if (!is_zeroed) {
memset(extent_addr_get(extent), 0,
extent_size_get(extent));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
extent_size_get(extent));
}
arena_decay_tick(tsdn, arena);
return (extent_addr_get(extent));
@ -126,11 +123,9 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
/* Update the size of the huge allocation. */
assert(extent_size_get(extent) != usize);
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
chunk_reregister(tsdn, extent);
/* Update zeroed. */
extent_zeroed_set(extent, post_zeroed);
@ -174,11 +169,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
extent_size_get(trail));
}
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks,
extent_addr_get(trail), extent_size_get(trail),
extent_committed_get(trail));
arena_extent_dalloc(tsdn, arena, trail);
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail);
}
/* Optionally fill trailing subchunk. */
@ -233,10 +224,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
arena_extent_dalloc(tsdn, arena, trail);
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks,
extent_addr_get(trail), extent_size_get(trail),
extent_zeroed_get(trail), extent_committed_get(trail));
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, trail);
return (true);
}
@ -362,16 +350,13 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent)
arena_t *arena;
arena = extent_arena_get(extent);
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, extent, ql_link);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
huge_dalloc_junk(tsdn, extent_addr_get(extent),
extent_size_get(extent));
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
extent_addr_get(extent), extent_size_get(extent));
arena_extent_dalloc(tsdn, arena, extent);
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent), extent);
arena_decay_tick(tsdn, arena);
}