diff --git a/include/jemalloc/internal/pa.h b/include/jemalloc/internal/pa.h index ef140b3e..a4f80818 100644 --- a/include/jemalloc/internal/pa.h +++ b/include/jemalloc/internal/pa.h @@ -122,8 +122,7 @@ size_t pa_shard_extent_sn_next(pa_shard_t *shard); edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, bool slab, szind_t szind, bool *zero, size_t *mapped_add); /* Returns true on error, in which case nothing changed. */ -bool -pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t new_usize, - szind_t szind, bool slab, bool *zero, size_t *mapped_add); +bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add); #endif /* JEMALLOC_INTERNAL_PA_H */ diff --git a/src/large.c b/src/large.c index 60b51d8c..c01b0577 100644 --- a/src/large.c +++ b/src/large.c @@ -106,7 +106,9 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, zero = true; } + size_t old_size = edata_size_get(edata); size_t old_usize = edata_usize_get(edata); + size_t new_size = usize + sz_large_pad; /* * Copy zero into is_zeroed_trail and pass the copy when allocating the @@ -116,7 +118,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize, bool is_zeroed_trail = zero; size_t mapped_add; szind_t szind = sz_size2index(usize); - bool err = pa_expand(tsdn, &arena->pa_shard, edata, usize, + bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size, szind, /* slab */ false, &is_zeroed_trail, &mapped_add); if (err) { return true; diff --git a/src/pa.c b/src/pa.c index 7fafa7e3..8f33d9a4 100644 --- a/src/pa.c +++ b/src/pa.c @@ -96,29 +96,31 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment, } bool -pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t new_usize, - szind_t szind, bool slab, bool *zero, size_t *mapped_add) { +pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size, + size_t new_size, szind_t szind, bool slab, bool *zero, size_t *mapped_add) { + assert(new_size > old_size); + ehooks_t *ehooks = pa_shard_ehooks_get(shard); - size_t old_usize = edata_usize_get(edata); - size_t trail_size = new_usize - old_usize; void *trail_begin = edata_past_get(edata); + size_t expand_amount = new_size - old_size; *mapped_add = 0; if (ehooks_merge_will_fail(ehooks)) { return true; } edata_t *trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_dirty, - trail_begin, trail_size, PAGE, /* slab */ false, SC_NSIZES, zero); + trail_begin, expand_amount, PAGE, /* slab */ false, SC_NSIZES, + zero); if (trail == NULL) { trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy, - trail_begin, trail_size, PAGE, /* slab */ false, SC_NSIZES, - zero); + trail_begin, expand_amount, PAGE, /* slab */ false, + SC_NSIZES, zero); } if (trail == NULL) { trail = ecache_alloc_grow(tsdn, shard, ehooks, - &shard->ecache_retained, trail_begin, trail_size, PAGE, + &shard->ecache_retained, trail_begin, expand_amount, PAGE, /* slab */ false, SC_NSIZES, zero); - *mapped_add = trail_size; + *mapped_add = expand_amount; } if (trail == NULL) { *mapped_add = 0;