PA: Take zero as a bool rather than as a bool *.
Now that we've moved junking to a higher level of the allocation stack, we don't care about this performance optimization (which only occurred in debug modes).
This commit is contained in:
parent
294b276fc7
commit
1a1124462e
@ -33,7 +33,7 @@ void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
|
||||
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
|
||||
#endif
|
||||
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool *zero);
|
||||
size_t usize, size_t alignment, bool zero);
|
||||
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
|
||||
edata_t *edata);
|
||||
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
|
@ -195,10 +195,10 @@ size_t pa_shard_extent_sn_next(pa_shard_t *shard);
|
||||
|
||||
/* Gets an edata for the given allocation. */
|
||||
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
|
||||
size_t alignment, bool slab, szind_t szind, bool *zero);
|
||||
size_t alignment, bool slab, szind_t szind, bool zero);
|
||||
/* Returns true on error, in which case nothing changed. */
|
||||
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, szind_t szind, bool slab, bool *zero);
|
||||
size_t new_size, szind_t szind, bool slab, bool zero);
|
||||
/*
|
||||
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
|
||||
* false otherwise.
|
||||
|
@ -347,7 +347,7 @@ arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
|
||||
|
||||
edata_t *
|
||||
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool *zero) {
|
||||
size_t alignment, bool zero) {
|
||||
szind_t szind = sz_size2index(usize);
|
||||
size_t esize = usize + sz_large_pad;
|
||||
|
||||
@ -736,10 +736,8 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
bool zero = false;
|
||||
|
||||
edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
|
||||
PAGE, /* slab */ true, /* szind */ binind, &zero);
|
||||
PAGE, /* slab */ true, /* szind */ binind, /* zero */ false);
|
||||
|
||||
if (slab == NULL) {
|
||||
return NULL;
|
||||
|
26
src/large.c
26
src/large.c
@ -23,7 +23,6 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero) {
|
||||
size_t ausize;
|
||||
edata_t *edata;
|
||||
bool is_zeroed;
|
||||
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
|
||||
|
||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||
@ -36,17 +35,11 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
if (config_fill && unlikely(opt_zero)) {
|
||||
zero = true;
|
||||
}
|
||||
/*
|
||||
* Copy zero into is_zeroed and pass the copy when allocating the
|
||||
* extent, so that it is possible to make correct zero fill decisions
|
||||
* below, even if is_zeroed ends up true when zero is false.
|
||||
*/
|
||||
is_zeroed = zero;
|
||||
if (likely(!tsdn_null(tsdn))) {
|
||||
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
|
||||
}
|
||||
if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
|
||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
||||
arena, usize, alignment, zero)) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -58,10 +51,6 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
malloc_mutex_unlock(tsdn, &arena->large_mtx);
|
||||
}
|
||||
|
||||
if (zero) {
|
||||
assert(is_zeroed);
|
||||
}
|
||||
|
||||
arena_decay_tick(tsdn, arena);
|
||||
return edata_addr_get(edata);
|
||||
}
|
||||
@ -99,23 +88,13 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
||||
bool zero) {
|
||||
arena_t *arena = arena_get_from_edata(edata);
|
||||
|
||||
if (config_fill && unlikely(opt_zero)) {
|
||||
zero = true;
|
||||
}
|
||||
|
||||
size_t old_size = edata_size_get(edata);
|
||||
size_t old_usize = edata_usize_get(edata);
|
||||
size_t new_size = usize + sz_large_pad;
|
||||
|
||||
/*
|
||||
* Copy zero into is_zeroed_trail and pass the copy when allocating the
|
||||
* extent, so that it is possible to make correct zero fill decisions
|
||||
* below, even if is_zeroed_trail ends up true when zero is false.
|
||||
*/
|
||||
bool is_zeroed_trail = zero;
|
||||
szind_t szind = sz_size2index(usize);
|
||||
bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
|
||||
szind, /* slab */ false, &is_zeroed_trail);
|
||||
szind, /* slab */ false, zero);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
@ -137,7 +116,6 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
||||
assert(nzero > 0);
|
||||
memset(zbase, 0, nzero);
|
||||
}
|
||||
assert(is_zeroed_trail);
|
||||
}
|
||||
arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize);
|
||||
|
||||
|
16
src/pa.c
16
src/pa.c
@ -112,7 +112,7 @@ pa_shard_may_have_muzzy(pa_shard_t *shard) {
|
||||
|
||||
edata_t *
|
||||
pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
bool slab, szind_t szind, bool *zero) {
|
||||
bool slab, szind_t szind, bool zero) {
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
@ -121,16 +121,16 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
ehooks_t *ehooks = pa_shard_ehooks_get(shard);
|
||||
edata_t *edata = ecache_alloc(tsdn, shard, ehooks,
|
||||
&shard->ecache_dirty, NULL, size, alignment, slab, szind,
|
||||
zero);
|
||||
&zero);
|
||||
|
||||
if (edata == NULL && pa_shard_may_have_muzzy(shard)) {
|
||||
edata = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
|
||||
NULL, size, alignment, slab, szind, zero);
|
||||
NULL, size, alignment, slab, szind, &zero);
|
||||
}
|
||||
if (edata == NULL) {
|
||||
edata = ecache_alloc_grow(tsdn, shard, ehooks,
|
||||
&shard->ecache_retained, NULL, size, alignment, slab,
|
||||
szind, zero);
|
||||
szind, &zero);
|
||||
mapped_add = size;
|
||||
}
|
||||
if (edata != NULL) {
|
||||
@ -145,7 +145,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
|
||||
bool
|
||||
pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
size_t new_size, szind_t szind, bool slab, bool *zero) {
|
||||
size_t new_size, szind_t szind, bool slab, bool zero) {
|
||||
assert(new_size > old_size);
|
||||
assert(edata_size_get(edata) == old_size);
|
||||
assert((new_size & PAGE_MASK) == 0);
|
||||
@ -161,16 +161,16 @@ pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
|
||||
}
|
||||
edata_t *trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_dirty,
|
||||
trail_begin, expand_amount, PAGE, /* slab */ false, SC_NSIZES,
|
||||
zero);
|
||||
&zero);
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc(tsdn, shard, ehooks, &shard->ecache_muzzy,
|
||||
trail_begin, expand_amount, PAGE, /* slab */ false,
|
||||
SC_NSIZES, zero);
|
||||
SC_NSIZES, &zero);
|
||||
}
|
||||
if (trail == NULL) {
|
||||
trail = ecache_alloc_grow(tsdn, shard, ehooks,
|
||||
&shard->ecache_retained, trail_begin, expand_amount, PAGE,
|
||||
/* slab */ false, SC_NSIZES, zero);
|
||||
/* slab */ false, SC_NSIZES, &zero);
|
||||
mapped_add = expand_amount;
|
||||
}
|
||||
if (trail == NULL) {
|
||||
|
Loading…
Reference in New Issue
Block a user