PA: Take zero as a bool rather than as a bool *.

Now that we've moved junking to a higher level of the allocation stack, we don't
care about this performance optimization (which only occurred in debug modes).
This commit is contained in:
David Goldblatt
2020-03-14 18:10:29 -07:00
committed by David Goldblatt
parent 294b276fc7
commit 1a1124462e
5 changed files with 15 additions and 39 deletions

View File

@@ -33,7 +33,7 @@ void arena_handle_new_dirty_pages(tsdn_t *tsdn, arena_t *arena);
size_t arena_slab_regind(edata_t *slab, szind_t binind, const void *ptr);
#endif
edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
size_t usize, size_t alignment, bool *zero);
size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,

View File

@@ -195,10 +195,10 @@ size_t pa_shard_extent_sn_next(pa_shard_t *shard);
/* Gets an edata for the given allocation. */
edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
size_t alignment, bool slab, szind_t szind, bool *zero);
size_t alignment, bool slab, szind_t szind, bool zero);
/* Returns true on error, in which case nothing changed. */
bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
size_t new_size, szind_t szind, bool slab, bool *zero);
size_t new_size, szind_t szind, bool slab, bool zero);
/*
* The same. Sets *generated_dirty to true if we produced new dirty pages, and
* false otherwise.