Unify extent_alloc_wrapper with the other wrappers.

Previously, it was really more like extents_alloc (it looks in an ecache for an
extent to reuse as its primary allocation pathway).  Make that pathway more
explciitly like extents_alloc, and rename extent_alloc_wrapper_hard accordingly.
This commit is contained in:
David Goldblatt 2019-12-12 17:30:28 -08:00 committed by David Goldblatt
parent d8b0b66c6c
commit ae23e5f426
4 changed files with 50 additions and 36 deletions

View File

@ -29,10 +29,14 @@ extern rtree_t extents_rtree;
edata_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit);
edata_t *extents_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit);
void extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata);
edata_t *extents_evict(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, size_t npages_min);
edata_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit);

View File

@ -444,8 +444,9 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
}
size_t size = usize + sz_large_pad;
if (edata == NULL) {
edata = extent_alloc_wrapper(tsdn, arena, ehooks, NULL, usize,
sz_large_pad, alignment, false, szind, zero, &commit);
edata = extents_alloc_grow(tsdn, arena, ehooks,
&arena->ecache_retained, NULL, usize, sz_large_pad,
alignment, false, szind, zero, &commit);
if (config_stats) {
/*
* edata may be NULL on OOM, but in that case mapped_add
@ -1210,8 +1211,8 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
zero = false;
commit = true;
slab = extent_alloc_wrapper(tsdn, arena, ehooks, NULL,
bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
slab = extents_alloc_grow(tsdn, arena, ehooks, &arena->ecache_retained,
NULL, bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
if (config_stats && slab != NULL) {
arena_stats_mapped_add(tsdn, &arena->stats,

View File

@ -52,6 +52,9 @@ static edata_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
bool *coalesced, bool growing_retained);
static void extent_record(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, edata_t *edata, bool growing_retained);
static edata_t *extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
ehooks_t *ehooks, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit);
/******************************************************************************/
@ -194,6 +197,35 @@ extents_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
return edata;
}
edata_t *
extents_alloc_grow(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero, bool *commit) {
assert(size + pad != 0);
assert(alignment != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
edata_t *edata = extent_alloc_retained(tsdn, arena, ehooks, new_addr,
size, pad, alignment, slab, szind, zero, commit);
if (edata == NULL) {
if (opt_retain && new_addr != NULL) {
/*
* When retain is enabled and new_addr is set, we do not
* attempt extent_alloc_wrapper which does mmap that is
* very unlikely to succeed (unless it happens to be at
* the end).
*/
return NULL;
}
edata = extent_alloc_wrapper(tsdn, arena, ehooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}
assert(edata == NULL || edata_dumpable_get(edata));
return edata;
}
void
extents_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
edata_t *edata) {
@ -996,10 +1028,13 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return edata;
}
static edata_t *
extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
size_t esize = size + pad;
edata_t *edata = edata_cache_get(tsdn, &arena->edata_cache,
arena->base);
@ -1027,33 +1062,6 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
return edata;
}
edata_t *
extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
szind_t szind, bool *zero, bool *commit) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
edata_t *edata = extent_alloc_retained(tsdn, arena, ehooks, new_addr,
size, pad, alignment, slab, szind, zero, commit);
if (edata == NULL) {
if (opt_retain && new_addr != NULL) {
/*
* When retain is enabled and new_addr is set, we do not
* attempt extent_alloc_wrapper_hard which does mmap
* that is very unlikely to succeed (unless it happens
* to be at the end).
*/
return NULL;
}
edata = extent_alloc_wrapper_hard(tsdn, arena, ehooks,
new_addr, size, pad, alignment, slab, szind, zero, commit);
}
assert(edata == NULL || edata_dumpable_get(edata));
return edata;
}
static bool
extent_can_coalesce(arena_t *arena, ecache_t *ecache, const edata_t *inner,
const edata_t *outer) {

View File

@ -159,9 +159,10 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
new_mapping = false;
}
} else {
if ((trail = extent_alloc_wrapper(tsdn, arena, ehooks,
edata_past_get(edata), trailsize, 0, CACHELINE, false,
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
if ((trail = extents_alloc_grow(tsdn, arena, ehooks,
&arena->ecache_retained, edata_past_get(edata), trailsize,
0, CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit))
== NULL) {
return true;
}
if (config_stats) {