Fix extent_alloc_cache[_locked]() to support decommitted allocation.

Fix extent_alloc_cache[_locked]() to support decommitted allocation, and
use this ability in arena_stash_dirty(), so that decommitted extents are
not needlessly committed during purging.  In practice this does not
happen on any currently supported systems, because both extent merging
and decommit must be implemented; all supported systems implement one
xor the other.
This commit is contained in:
Jason Evans 2016-11-03 17:25:54 -07:00
parent 4f7d8c2dee
commit 8dd5ea87ca
4 changed files with 19 additions and 20 deletions

View File

@ -101,10 +101,10 @@ ph_proto(, extent_heap_, extent_heap_t, extent_t)
extent_t *extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab); size_t alignment, bool *zero, bool *commit, bool slab);
extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_t *extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab); size_t alignment, bool *zero, bool *commit, bool slab);
extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab); size_t alignment, bool *zero, bool *commit, bool slab);

View File

@ -49,11 +49,12 @@ arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab) size_t alignment, bool *zero, bool slab)
{ {
bool commit = true;
malloc_mutex_assert_owner(tsdn, &arena->lock); malloc_mutex_assert_owner(tsdn, &arena->lock);
return (extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize, return (extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
pad, alignment, zero, slab)); pad, alignment, zero, &commit, slab));
} }
extent_t * extent_t *
@ -681,7 +682,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
for (extent = qr_next(&arena->extents_dirty, qr_link); extent != for (extent = qr_next(&arena->extents_dirty, qr_link); extent !=
&arena->extents_dirty; extent = next) { &arena->extents_dirty; extent = next) {
size_t npages; size_t npages;
bool zero; bool zero, commit;
UNUSED extent_t *textent; UNUSED extent_t *textent;
npages = extent_size_get(extent) >> LG_PAGE; npages = extent_size_get(extent) >> LG_PAGE;
@ -691,9 +692,10 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
next = qr_next(extent, qr_link); next = qr_next(extent, qr_link);
/* Allocate. */ /* Allocate. */
zero = false; zero = false;
commit = false;
textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks, textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0, PAGE, extent_base_get(extent), extent_size_get(extent), 0, PAGE,
&zero, false); &zero, &commit, false);
assert(textent == extent); assert(textent == extent);
assert(zero == extent_zeroed_get(extent)); assert(zero == extent_zeroed_get(extent));
extent_ring_remove(extent); extent_ring_remove(extent);
@ -943,9 +945,8 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
extent_t *slab; extent_t *slab;
arena_slab_data_t *slab_data; arena_slab_data_t *slab_data;
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
bool zero; bool zero = false;
zero = false;
slab = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL, slab = arena_extent_cache_alloc_locked(tsdn, arena, &extent_hooks, NULL,
bin_info->slab_size, 0, PAGE, &zero, true); bin_info->slab_size, 0, PAGE, &zero, true);
if (slab == NULL) { if (slab == NULL) {

View File

@ -517,8 +517,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
extent_usize_set(extent, usize); extent_usize_set(extent, usize);
} }
if (!extent_committed_get(extent) && extent_commit_wrapper(tsdn, arena, if (commit && !extent_committed_get(extent) &&
r_extent_hooks, extent, 0, extent_size_get(extent))) { extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent, 0,
extent_size_get(extent))) {
if (!locked) if (!locked)
malloc_mutex_unlock(tsdn, &arena->extents_mtx); malloc_mutex_unlock(tsdn, &arena->extents_mtx);
extent_record(tsdn, arena, r_extent_hooks, extent_heaps, cache, extent_record(tsdn, arena, r_extent_hooks, extent_heaps, cache,
@ -590,44 +591,41 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
static extent_t * static extent_t *
extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena, extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize, extent_hooks_t **r_extent_hooks, bool locked, void *new_addr, size_t usize,
size_t pad, size_t alignment, bool *zero, bool slab) size_t pad, size_t alignment, bool *zero, bool *commit, bool slab)
{ {
extent_t *extent; extent_t *extent;
bool commit;
assert(usize + pad != 0); assert(usize + pad != 0);
assert(alignment != 0); assert(alignment != 0);
commit = true;
extent = extent_recycle(tsdn, arena, r_extent_hooks, extent = extent_recycle(tsdn, arena, r_extent_hooks,
arena->extents_cached, locked, true, new_addr, usize, pad, arena->extents_cached, locked, true, new_addr, usize, pad,
alignment, zero, &commit, slab); alignment, zero, commit, slab);
if (extent == NULL) if (extent == NULL)
return (NULL); return (NULL);
assert(commit);
return (extent); return (extent);
} }
extent_t * extent_t *
extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena, extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab) size_t alignment, bool *zero, bool *commit, bool slab)
{ {
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx); malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true, return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
new_addr, usize, pad, alignment, zero, slab)); new_addr, usize, pad, alignment, zero, commit, slab));
} }
extent_t * extent_t *
extent_alloc_cache(tsdn_t *tsdn, arena_t *arena, extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad, extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool slab) size_t alignment, bool *zero, bool *commit, bool slab)
{ {
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false, return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
new_addr, usize, pad, alignment, zero, slab)); new_addr, usize, pad, alignment, zero, commit, slab));
} }
static void * static void *

View File

@ -143,8 +143,8 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
extent_t *trail; extent_t *trail;
if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks, if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) extent_past_get(extent), trailsize, CACHELINE, &is_zeroed_trail)) ==
== NULL) { NULL) {
bool commit = true; bool commit = true;
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE, extent_past_get(extent), trailsize, 0, CACHELINE,