PA: Move in remaining page allocation functions.

This commit is contained in:
David Goldblatt 2020-03-11 11:36:38 -07:00 committed by David Goldblatt
parent 74958567a4
commit 71fc0dc968
5 changed files with 75 additions and 30 deletions

View File

@ -27,8 +27,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
bin_stats_data_t *bstats, arena_stats_large_t *lstats, bin_stats_data_t *bstats, arena_stats_large_t *lstats,
arena_stats_extents_t *estats); arena_stats_extents_t *estats);
void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
ehooks_t *ehooks, edata_t *edata);
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr); size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
#endif #endif

View File

@ -119,10 +119,26 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind,
pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx);
size_t pa_shard_extent_sn_next(pa_shard_t *shard); size_t pa_shard_extent_sn_next(pa_shard_t *shard);
/* Gets an edata for the given allocation. */
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add); size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add);
/* Returns true on error, in which case nothing changed. */ /* Returns true on error, in which case nothing changed. */
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add); size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add);
/*
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
* false otherwise.
*/
bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *generated_dirty);
/*
* Frees the given edata back to the pa. Sets *generated_dirty if we produced
* new dirty pages (well, we alwyas set it for now; but this need not be the
* case).
* (We could make generated_dirty the return value of course, but this is more
* consistent with the shrink pathway and our error codes here).
*/
void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *generated_dirty);
#endif /* JEMALLOC_INTERNAL_PA_H */ #endif /* JEMALLOC_INTERNAL_PA_H */

View File

@ -276,14 +276,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
} }
} }
void void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena) {
arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0); WITNESS_RANK_CORE, 0);
ecache_dalloc(tsdn, &arena->pa_shard, ehooks,
&arena->pa_shard.ecache_dirty, edata);
if (arena_dirty_decay_ms_get(arena) == 0) { if (arena_dirty_decay_ms_get(arena) == 0) {
arena_decay_dirty(tsdn, arena, false, true); arena_decay_dirty(tsdn, arena, false, true);
} else { } else {
@ -636,7 +632,7 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
static size_t static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache,
bool all, edata_list_t *decay_extents, bool is_background_thread) { bool all, edata_list_t *decay_extents) {
size_t nmadvise, nunmapped; size_t nmadvise, nunmapped;
size_t npurged; size_t npurged;
@ -728,8 +724,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
npages_limit, npages_decay_max, &decay_extents); npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) { if (npurge != 0) {
size_t npurged = arena_decay_stashed(tsdn, arena, ehooks, decay, size_t npurged = arena_decay_stashed(tsdn, arena, ehooks, decay,
decay_stats, ecache, all, &decay_extents, decay_stats, ecache, all, &decay_extents);
is_background_thread);
assert(npurged == npurge); assert(npurged == npurge);
} }
@ -805,8 +800,11 @@ void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) { arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE); arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE);
ehooks_t *ehooks = arena_get_ehooks(arena); bool generated_dirty;
arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab); pa_dalloc(tsdn, &arena->pa_shard, slab, &generated_dirty);
if (generated_dirty) {
arena_handle_new_dirty_pages(tsdn, arena);
}
} }
static void static void

View File

@ -69,30 +69,27 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
static bool static bool
large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) { large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
arena_t *arena = arena_get_from_edata(edata); arena_t *arena = arena_get_from_edata(edata);
size_t oldusize = edata_usize_get(edata);
ehooks_t *ehooks = arena_get_ehooks(arena); ehooks_t *ehooks = arena_get_ehooks(arena);
size_t diff = edata_size_get(edata) - (usize + sz_large_pad); size_t old_size = edata_size_get(edata);
size_t old_usize = edata_usize_get(edata);
assert(oldusize > usize); assert(old_usize > usize);
if (ehooks_split_will_fail(ehooks)) { if (ehooks_split_will_fail(ehooks)) {
return true; return true;
} }
/* Split excess pages. */ bool generated_dirty;
if (diff != 0) { bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
edata_t *trail = extent_split_wrapper(tsdn, usize + sz_large_pad, sz_size2index(usize), false,
&arena->pa_shard.edata_cache, ehooks, edata, &generated_dirty);
usize + sz_large_pad, sz_size2index(usize), false, diff, if (err) {
SC_NSIZES, false);
if (trail == NULL) {
return true; return true;
} }
if (generated_dirty) {
arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail); arena_handle_new_dirty_pages(tsdn, arena);
} }
arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize);
arena_extent_ralloc_large_shrink(tsdn, arena, edata, oldusize);
return false; return false;
} }
@ -275,8 +272,11 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
static void static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
ehooks_t *ehooks = arena_get_ehooks(arena); bool generated_dirty;
arena_extents_dirty_dalloc(tsdn, arena, ehooks, edata); pa_dalloc(tsdn, &arena->pa_shard, edata, &generated_dirty);
if (generated_dirty) {
arena_handle_new_dirty_pages(tsdn, arena);
}
} }
void void

View File

@ -99,6 +99,7 @@ bool
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) { size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) {
assert(new_size > old_size); assert(new_size > old_size);
assert(edata_size_get(edata) == old_size);
ehooks_t *ehooks = pa_shard_ehooks_get(shard); ehooks_t *ehooks = pa_shard_ehooks_get(shard);
void *trail_begin = edata_past_get(edata); void *trail_begin = edata_past_get(edata);
@ -135,3 +136,34 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
emap_remap(tsdn, &emap_global, edata, szind, slab); emap_remap(tsdn, &emap_global, edata, szind, slab);
return false; return false;
} }
bool
pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *generated_dirty) {
assert(new_size < old_size);
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
*generated_dirty = false;
if (ehooks_split_will_fail(ehooks)) {
return true;
}
edata_t *trail = extent_split_wrapper(tsdn, &shard->edata_cache, ehooks,
edata, new_size, szind, slab, old_size - new_size, SC_NSIZES,
false);
if (trail == NULL) {
return true;
}
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, trail);
*generated_dirty = true;
return false;
}
void
pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
bool *generated_dirty) {
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, edata);
*generated_dirty = true;
}