From 71fc0dc968189e72a4437fb38759ef380a02a7ab Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Wed, 11 Mar 2020 11:36:38 -0700 Subject: [PATCH] PA: Move in remaining page allocation functions. --- include/jemalloc/internal/arena_externs.h | 3 +- include/jemalloc/internal/pa.h | 16 ++++++++++ src/arena.c | 18 +++++------- src/large.c | 36 +++++++++++------------ src/pa.c | 32 ++++++++++++++++++++ 5 files changed, 75 insertions(+), 30 deletions(-) diff --git a/include/jemalloc/internal/arena_externs.h b/include/jemalloc/internal/arena_externs.h index 8548b1f0..cdbfa4b4 100644 --- a/include/jemalloc/internal/arena_externs.h +++ b/include/jemalloc/internal/arena_externs.h @@ -27,8 +27,7 @@ void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, bin_stats_data_t *bstats, arena_stats_large_t *lstats, arena_stats_extents_t *estats); -void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, - ehooks_t *ehooks, edata_t *edata); +void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena); #ifdef JEMALLOC_JET size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr); #endif diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index a4f80818..df2e88f9 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -119,10 +119,26 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, base_t *base, unsigned ind, pa_shard_stats_t *stats, malloc_mutex_t *stats_mtx); size_t pa_shard_extent_sn_next(pa_shard_t *shard); +/* Gets an edata for the given allocation. */ edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add); /* Returns true on error, in which case nothing changed. */ bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add); +/* + * The same. Sets *generated_dirty to true if we produced new dirty pages, and + * false otherwise. + */ +bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool slab, bool *generated_dirty); +/* + * Frees the given edata back to the pa. Sets *generated_dirty if we produced + * new dirty pages (well, we alwyas set it for now; but this need not be the + * case). + * (We could make generated_dirty the return value of course, but this is more + * consistent with the shrink pathway and our error codes here). + */ +void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, + bool *generated_dirty); #endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/src/arena.c b/src/arena.c index c3365a1a..35fefeb1 100644 --- a/src/arena.c +++ b/src/arena.c @@ -276,14 +276,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, } } -void -arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, - edata_t *edata) { +void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena) { witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - ecache_dalloc(tsdn, &arena->pa_shard, ehooks, - &arena->pa_shard.ecache_dirty, edata); if (arena_dirty_decay_ms_get(arena) == 0) { arena_decay_dirty(tsdn, arena, false, true); } else { @@ -636,7 +632,7 @@ arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, static size_t arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, decay_t *decay, pa_shard_decay_stats_t *decay_stats, ecache_t *ecache, - bool all, edata_list_t *decay_extents, bool is_background_thread) { + bool all, edata_list_t *decay_extents) { size_t nmadvise, nunmapped; size_t npurged; @@ -728,8 +724,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, decay_t *decay, npages_limit, npages_decay_max, &decay_extents); if (npurge != 0) { size_t npurged = arena_decay_stashed(tsdn, arena, ehooks, decay, - decay_stats, ecache, all, &decay_extents, - is_background_thread); + decay_stats, ecache, all, &decay_extents); assert(npurged == npurge); } @@ -805,8 +800,11 @@ void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) { arena_nactive_sub(arena, edata_size_get(slab) >> LG_PAGE); - ehooks_t *ehooks = arena_get_ehooks(arena); - arena_extents_dirty_dalloc(tsdn, arena, ehooks, slab); + bool generated_dirty; + pa_dalloc(tsdn, &arena->pa_shard, slab, &generated_dirty); + if (generated_dirty) { + arena_handle_new_dirty_pages(tsdn, arena); + } } static void diff --git a/src/large.c b/src/large.c index c01b0577..2b913d65 100644 --- a/src/large.c +++ b/src/large.c @@ -69,30 +69,27 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, static bool large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) { arena_t *arena = arena_get_from_edata(edata); - size_t oldusize = edata_usize_get(edata); ehooks_t *ehooks = arena_get_ehooks(arena); - size_t diff = edata_size_get(edata) - (usize + sz_large_pad); + size_t old_size = edata_size_get(edata); + size_t old_usize = edata_usize_get(edata); - assert(oldusize > usize); + assert(old_usize > usize); if (ehooks_split_will_fail(ehooks)) { return true; } - /* Split excess pages. */ - if (diff != 0) { - edata_t *trail = extent_split_wrapper(tsdn, - &arena->pa_shard.edata_cache, ehooks, edata, - usize + sz_large_pad, sz_size2index(usize), false, diff, - SC_NSIZES, false); - if (trail == NULL) { - return true; - } - - arena_extents_dirty_dalloc(tsdn, arena, ehooks, trail); + bool generated_dirty; + bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size, + usize + sz_large_pad, sz_size2index(usize), false, + &generated_dirty); + if (err) { + return true; } - - arena_extent_ralloc_large_shrink(tsdn, arena, edata, oldusize); + if (generated_dirty) { + arena_handle_new_dirty_pages(tsdn, arena); + } + arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize); return false; } @@ -275,8 +272,11 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata, static void large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { - ehooks_t *ehooks = arena_get_ehooks(arena); - arena_extents_dirty_dalloc(tsdn, arena, ehooks, edata); + bool generated_dirty; + pa_dalloc(tsdn, &arena->pa_shard, edata, &generated_dirty); + if (generated_dirty) { + arena_handle_new_dirty_pages(tsdn, arena); + } } void diff --git a/src/pa.c b/src/pa.c index 8f33d9a4..dfbff226 100644 --- a/src/pa.c +++ b/src/pa.c @@ -99,6 +99,7 @@ bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) { assert(new_size > old_size); + assert(edata_size_get(edata) == old_size); ehooks_t *ehooks = pa_shard_ehooks_get(shard); void *trail_begin = edata_past_get(edata); @@ -135,3 +136,34 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, emap_remap(tsdn, &emap_global, edata, szind, slab); return false; } + +bool +pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool slab, bool *generated_dirty) { + assert(new_size < old_size); + + ehooks_t *ehooks = pa_shard_ehooks_get(shard); + *generated_dirty = false; + + if (ehooks_split_will_fail(ehooks)) { + return true; + } + + edata_t *trail = extent_split_wrapper(tsdn, &shard->edata_cache, ehooks, + edata, new_size, szind, slab, old_size - new_size, SC_NSIZES, + false); + if (trail == NULL) { + return true; + } + ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, trail); + *generated_dirty = true; + return false; +} + +void +pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, + bool *generated_dirty) { + ehooks_t *ehooks = pa_shard_ehooks_get(shard); + ecache_dalloc(tsdn, shard, ehooks, &shard->ecache_dirty, edata); + *generated_dirty = true; +}