Add/use chunk_merge_wrapper().

This commit is contained in:
Jason Evans 2016-05-18 10:32:05 -07:00
parent 384e88f451
commit 1ad060584f
6 changed files with 102 additions and 94 deletions

View File

@ -496,7 +496,7 @@ void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize); void *chunk, size_t oldsize, size_t usize);
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
void *chunk, size_t oldsize, size_t usize, bool *zero); extent_t *extent, size_t usize);
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena); ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
ssize_t lg_dirty_mult); ssize_t lg_dirty_mult);

View File

@ -75,6 +75,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length); size_t length);
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
bool chunk_boot(void); bool chunk_boot(void);
void chunk_prefork(tsdn_t *tsdn); void chunk_prefork(tsdn_t *tsdn);
void chunk_postfork_parent(tsdn_t *tsdn); void chunk_postfork_parent(tsdn_t *tsdn);

View File

@ -176,6 +176,7 @@ chunk_hooks_get
chunk_hooks_set chunk_hooks_set
chunk_in_dss chunk_in_dss
chunk_lookup chunk_lookup
chunk_merge_wrapper
chunk_npages chunk_npages
chunk_postfork_child chunk_postfork_child
chunk_postfork_parent chunk_postfork_parent

View File

@ -948,69 +948,71 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
} }
static bool
arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
chunksize, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
*zero, true);
err = true;
}
return (err);
}
bool bool
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
size_t oldsize, size_t usize, bool *zero) size_t usize)
{ {
bool err; bool err;
bool zero = false;
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); void *nchunk =
size_t udiff = usize - oldsize; (void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); size_t udiff = usize - extent_size_get(extent);
size_t cdiff = CHUNK_CEILING(usize) -
CHUNK_CEILING(extent_size_get(extent));
extent_t *trail;
malloc_mutex_lock(tsdn, &arena->lock); malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */ /* Optimistically update stats. */
if (config_stats) { if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize); arena_huge_ralloc_stats_update(arena, extent_size_get(extent),
usize);
arena->stats.mapped += cdiff; arena->stats.mapped += cdiff;
} }
arena_nactive_add(arena, udiff >> LG_PAGE); arena_nactive_add(arena, udiff >> LG_PAGE);
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL); chunksize, &zero, true) == NULL);
malloc_mutex_unlock(tsdn, &arena->lock); malloc_mutex_unlock(tsdn, &arena->lock);
if (err) { if (err) {
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, bool commit = true;
&chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
cdiff); if (chunk_alloc_wrapper(tsdn, arena, &chunk_hooks, nchunk,
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, cdiff, chunksize, &zero, &commit) == NULL)
cdiff, true, arena->ind)) { goto label_revert;
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
*zero, true);
err = true;
} }
return (err); trail = arena_extent_alloc(tsdn, arena);
if (trail == NULL) {
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
zero, true);
goto label_revert;
}
extent_init(trail, arena, nchunk, cdiff, true, zero, true, false);
if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
arena_extent_dalloc(tsdn, arena, trail);
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
zero, true);
goto label_revert;
}
if (usize < extent_size_get(extent))
extent_size_set(extent, usize);
return (false);
label_revert:
/* Revert optimistic stats updates. */
malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena,
extent_size_get(extent), usize);
arena->stats.mapped -= cdiff;
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
malloc_mutex_unlock(tsdn, &arena->lock);
return (true);
} }
/* /*

View File

@ -586,51 +586,26 @@ static void
chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_try_coalesce(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache) extent_t *a, extent_t *b, extent_heap_t extent_heaps[NPSIZES], bool cache)
{ {
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
if (!chunk_can_coalesce(a, b)) if (!chunk_can_coalesce(a, b))
return; return;
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
arena->ind))
return;
/*
* The rtree writes must happen while all the relevant elements are
* owned, so the following code uses decomposed helper functions rather
* than chunk_{,de}register() to do things in the right order.
*/
extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
if (a_elm_b != NULL) {
rtree_elm_write_acquired(tsdn, &chunks_rtree, a_elm_b, NULL);
rtree_elm_release(tsdn, &chunks_rtree, a_elm_b);
}
if (b_elm_b != NULL) {
rtree_elm_write_acquired(tsdn, &chunks_rtree, b_elm_a, NULL);
rtree_elm_release(tsdn, &chunks_rtree, b_elm_a);
} else
b_elm_b = b_elm_a;
extent_heaps_remove(extent_heaps, a); extent_heaps_remove(extent_heaps, a);
extent_heaps_remove(extent_heaps, b); extent_heaps_remove(extent_heaps, b);
arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache); arena_chunk_cache_maybe_remove(extent_arena_get(a), a, cache);
arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache); arena_chunk_cache_maybe_remove(extent_arena_get(b), b, cache);
extent_size_set(a, extent_size_get(a) + extent_size_get(b)); if (chunk_merge_wrapper(tsdn, arena, chunk_hooks, a, b)) {
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); extent_heaps_insert(extent_heaps, a);
extent_heaps_insert(extent_heaps, b);
arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
arena_chunk_cache_maybe_insert(extent_arena_get(b), b, cache);
return;
}
extent_heaps_insert(extent_heaps, a); extent_heaps_insert(extent_heaps, a);
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache); arena_chunk_cache_maybe_insert(extent_arena_get(a), a, cache);
arena_extent_dalloc(tsdn, extent_arena_get(b), b);
} }
static void static void
@ -820,6 +795,46 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
return (false); return (false);
} }
bool
chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_t *a, extent_t *b)
{
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
arena->ind))
return (true);
/*
* The rtree writes must happen while all the relevant elements are
* owned, so the following code uses decomposed helper functions rather
* than chunk_{,de}register() to do things in the right order.
*/
extent_rtree_acquire(tsdn, a, true, false, &a_elm_a, &a_elm_b);
extent_rtree_acquire(tsdn, b, true, false, &b_elm_a, &b_elm_b);
if (a_elm_b != NULL) {
rtree_elm_write_acquired(tsdn, &chunks_rtree, a_elm_b, NULL);
rtree_elm_release(tsdn, &chunks_rtree, a_elm_b);
}
if (b_elm_b != NULL) {
rtree_elm_write_acquired(tsdn, &chunks_rtree, b_elm_a, NULL);
rtree_elm_release(tsdn, &chunks_rtree, b_elm_a);
} else
b_elm_b = b_elm_a;
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a);
extent_rtree_release(tsdn, a_elm_a, b_elm_b);
arena_extent_dalloc(tsdn, extent_arena_get(b), b);
return (false);
}
bool bool
chunk_boot(void) chunk_boot(void)
{ {

View File

@ -215,31 +215,19 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
size_t oldsize, size_t usize, bool zero) size_t oldsize, size_t usize, bool zero)
{ {
arena_t *arena; arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk; bool is_zeroed_subchunk;
arena = extent_arena_get(extent); arena = extent_arena_get(extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx); malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_zeroed_get(extent); is_zeroed_subchunk = extent_zeroed_get(extent);
malloc_mutex_unlock(tsdn, &arena->huge_mtx); malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* if (arena_chunk_ralloc_huge_expand(tsdn, arena, extent, usize))
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
* update extent's zeroed field, and zero as necessary.
*/
is_zeroed_chunk = false;
if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true); return (true);
/* Update the size of the huge allocation. */
chunk_deregister(tsdn, extent);
malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_size_set(extent, usize);
extent_zeroed_set(extent, extent_zeroed_get(extent) && is_zeroed_chunk);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
chunk_reregister(tsdn, extent);
if (zero || (config_fill && unlikely(opt_zero))) { if (zero || (config_fill && unlikely(opt_zero))) {
bool is_zeroed_chunk = extent_zeroed_get(extent);
if (!is_zeroed_subchunk) { if (!is_zeroed_subchunk) {
memset((void *)((uintptr_t)ptr + oldsize), 0, memset((void *)((uintptr_t)ptr + oldsize), 0,
CHUNK_CEILING(oldsize) - oldsize); CHUNK_CEILING(oldsize) - oldsize);