Trim before commit in extent_recycle().
This avoids creating clean committed pages as a side effect of aligned allocation. For configurations that decommit memory, purged pages are decommitted, and decommitted extents cannot be coalesced with committed extents. Unless the clean committed pages happen to be selected during allocation, they cause unnecessary permanent extent fragmentation. This resolves #766.
This commit is contained in:
parent
acf4c8ae33
commit
fed9a880c8
@ -829,12 +829,16 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
bool committed = false;
|
||||
extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
|
||||
rtree_ctx, extents, false, new_addr, size, pad, alignment, slab,
|
||||
zero, commit);
|
||||
zero, &committed);
|
||||
if (extent == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (committed) {
|
||||
*commit = true;
|
||||
}
|
||||
|
||||
extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
|
||||
extents, new_addr, size, pad, alignment, slab, szind, extent);
|
||||
@ -996,7 +1000,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
|
||||
assert(new_addr == NULL || leadsize == 0);
|
||||
assert(alloc_size >= leadsize + esize);
|
||||
size_t trailsize = alloc_size - leadsize - esize;
|
||||
if (extent_zeroed_get(extent)) {
|
||||
if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
|
||||
*zero = true;
|
||||
}
|
||||
if (extent_committed_get(extent)) {
|
||||
|
@ -15,7 +15,9 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
||||
return NULL;
|
||||
}
|
||||
assert(ret != NULL);
|
||||
*zero = true;
|
||||
if (*commit) {
|
||||
*zero = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user