PA: Have large expands use it.

This commit is contained in:
David Goldblatt 2020-03-10 14:38:55 -07:00 committed by David Goldblatt
parent 7be3dea82c
commit 0880c2ab97
3 changed files with 57 additions and 43 deletions

View File

@ -119,7 +119,10 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx);
size_t pa_shard_extent_sn_next(pa_shard_t *shard); size_t pa_shard_extent_sn_next(pa_shard_t *shard);
edata_t * edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add);
bool slab, szind_t szind, bool *zero, size_t *mapped_add); /* Returns true on error, in which case nothing changed. */
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
size_t new_usize, bool *zero, size_t *mapped_add);
#endif /* JEMALLOC_INTERNAL_PA_H */ #endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -101,57 +101,28 @@ static bool
large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) { bool zero) {
arena_t *arena = arena_get_from_edata(edata); arena_t *arena = arena_get_from_edata(edata);
size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena);
size_t trailsize = usize - oldusize;
if (ehooks_merge_will_fail(ehooks)) {
return true;
}
if (config_fill && unlikely(opt_zero)) { if (config_fill && unlikely(opt_zero)) {
zero = true; zero = true;
} }
size_t old_usize = edata_usize_get(edata);
/* /*
* Copy zero into is_zeroed_trail and pass the copy when allocating the * Copy zero into is_zeroed_trail and pass the copy when allocating the
* extent, so that it is possible to make correct zero fill decisions * extent, so that it is possible to make correct zero fill decisions
* below, even if is_zeroed_trail ends up true when zero is false. * below, even if is_zeroed_trail ends up true when zero is false.
*/ */
bool is_zeroed_trail = zero; bool is_zeroed_trail = zero;
edata_t *trail; size_t mapped_add;
bool new_mapping; bool err = pa_expand(tsdn, &arena->pa_shard, edata, usize,
if ((trail = ecache_alloc(tsdn, &arena->pa_shard, ehooks, &is_zeroed_trail, &mapped_add);
&arena->pa_shard.ecache_dirty, edata_past_get(edata), trailsize, if (err) {
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL
|| (trail = ecache_alloc(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_muzzy, edata_past_get(edata), trailsize,
CACHELINE, false, SC_NSIZES, &is_zeroed_trail)) != NULL) {
if (config_stats) {
new_mapping = false;
}
} else {
if ((trail = ecache_alloc_grow(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_retained, edata_past_get(edata),
trailsize, CACHELINE, false, SC_NSIZES, &is_zeroed_trail))
== NULL) {
return true;
}
if (config_stats) {
new_mapping = true;
}
}
if (extent_merge_wrapper(tsdn, ehooks, &arena->pa_shard.edata_cache,
edata, trail)) {
extent_dalloc_wrapper(tsdn, &arena->pa_shard, ehooks, trail);
return true; return true;
} }
szind_t szind = sz_size2index(usize); if (config_stats && mapped_add > 0) {
emap_remap(tsdn, &emap_global, edata, szind, false); pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, mapped_add);
if (config_stats && new_mapping) {
pa_shard_stats_mapped_add(tsdn, &arena->pa_shard, trailsize);
} }
if (zero) { if (zero) {
@ -164,7 +135,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
* of CACHELINE in [0 .. PAGE). * of CACHELINE in [0 .. PAGE).
*/ */
void *zbase = (void *) void *zbase = (void *)
((uintptr_t)edata_addr_get(edata) + oldusize); ((uintptr_t)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE)); PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
@ -173,7 +144,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
} }
assert(is_zeroed_trail); assert(is_zeroed_trail);
} }
arena_extent_ralloc_large_expand(tsdn, arena, edata, oldusize); arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize);
return false; return false;
} }

View File

@ -94,3 +94,43 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
} }
return edata; return edata;
} }
bool
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t new_usize,
bool *zero, size_t *mapped_add) {
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
size_t old_usize = edata_usize_get(edata);
size_t trail_size = new_usize - old_usize;
void *trail_begin = edata_past_get(edata);
*mapped_add = 0;
if (ehooks_merge_will_fail(ehooks)) {
return true;
}
edata_t *trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_dirty,
trail_begin, trail_size, PAGE, /* slab */ false, SC_NSIZES, zero);
if (trail == NULL) {
trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
trail_begin, trail_size, PAGE, /* slab */ false, SC_NSIZES,
zero);
}
if (trail == NULL) {
trail = ecache_alloc_grow(tsdn, shard, ehooks,
&shard->ecache_retained, trail_begin, trail_size, PAGE,
/* slab */ false, SC_NSIZES, zero);
*mapped_add = trail_size;
}
if (trail == NULL) {
*mapped_add = 0;
return true;
}
if (extent_merge_wrapper(tsdn, ehooks, &shard->edata_cache, edata,
trail)) {
extent_dalloc_wrapper(tsdn, shard, ehooks, trail);
*mapped_add = 0;
return true;
}
szind_t szind = sz_size2index(new_usize);
emap_remap(tsdn, &emap_global, edata, szind, /* slab */ false);
return false;
}