Initialize deferred_work_generated

As the code evolves, some code paths that have previously assigned
deferred_work_generated may cease being reached. This would leave the value
uninitialized. This change initializes the value for safety.
This commit is contained in:
Alex Lapenkou 2021-10-06 15:22:38 -07:00 committed by Alexander Lapenkov
parent 912324a1ac
commit c9ebff0fd6
9 changed files with 23 additions and 40 deletions

View File

@ -324,7 +324,7 @@ arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero) {
bool deferred_work_generated;
bool deferred_work_generated = false;
szind_t szind = sz_size2index(usize);
size_t esize = usize + sz_large_pad;
@ -561,7 +561,7 @@ arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
void
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
bool deferred_work_generated;
bool deferred_work_generated = false;
pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
if (deferred_work_generated) {
arena_handle_deferred_work(tsdn, arena);
@ -825,7 +825,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
static edata_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) {
bool deferred_work_generated;
bool deferred_work_generated = false;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);

View File

@ -785,7 +785,6 @@ static bool
hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero, bool *deferred_work_generated) {
/* Expand not yet supported. */
*deferred_work_generated = false;
return true;
}
@ -793,7 +792,6 @@ static bool
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated) {
/* Shrink not yet supported. */
*deferred_work_generated = false;
return true;
}

View File

@ -64,7 +64,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
return true;
}
bool deferred_work_generated;
bool deferred_work_generated = false;
bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
usize + sz_large_pad, sz_size2index(usize),
&deferred_work_generated);
@ -90,7 +90,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
szind_t szind = sz_size2index(usize);
bool deferred_work_generated;
bool deferred_work_generated = false;
bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
szind, zero, &deferred_work_generated);
@ -249,7 +249,7 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
static void
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
bool deferred_work_generated;
bool deferred_work_generated = false;
pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
if (deferred_work_generated) {
arena_handle_deferred_work(tsdn, arena);

View File

@ -126,7 +126,6 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
assert(!guarded || alignment <= PAGE);
edata_t *edata = NULL;
*deferred_work_generated = false;
if (!guarded && pa_shard_uses_hpa(shard)) {
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
zero, /* guarded */ false, deferred_work_generated);

View File

@ -157,8 +157,6 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
static edata_t *
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
bool zero, bool guarded, bool *deferred_work_generated) {
*deferred_work_generated = false;
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
@ -179,8 +177,6 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
pac_t *pac = (pac_t *)self;
ehooks_t *ehooks = pac_ehooks_get(pac);
*deferred_work_generated = false;
size_t mapped_add = 0;
size_t expand_amount = new_size - old_size;
@ -221,8 +217,6 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t shrink_amount = old_size - new_size;
*deferred_work_generated = false;
if (ehooks_split_will_fail(ehooks)) {
return true;
}

View File

@ -148,7 +148,7 @@ sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
}
malloc_mutex_unlock(tsdn, &shard->mtx);
bool deferred_work_generated;
bool deferred_work_generated = false;
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
&deferred_work_generated);
}
@ -178,7 +178,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
edata_list_active_t result;
edata_list_active_init(&result);
bool deferred_work_generated;
bool deferred_work_generated = false;
size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated);
@ -223,7 +223,6 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
assert(!guarded);
sec_t *sec = (sec_t *)self;
*deferred_work_generated = false;
if (zero || alignment > PAGE || sec->opts.nshards == 0
|| size > sec->opts.max_alloc) {
@ -291,7 +290,7 @@ sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
* we're disabling the HPA or resetting the arena, both of which are
* rare pathways.
*/
bool deferred_work_generated;
bool deferred_work_generated = false;
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
&deferred_work_generated);
}
@ -341,7 +340,6 @@ sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
malloc_mutex_lock(tsdn, &shard->mtx);
if (shard->enabled) {
*deferred_work_generated = false;
sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
} else {
malloc_mutex_unlock(tsdn, &shard->mtx);

View File

@ -79,7 +79,7 @@ TEST_BEGIN(test_alloc_max) {
edata_t *edata;
/* Small max */
bool deferred_work_generated;
bool deferred_work_generated = false;
edata = pai_alloc(tsdn, &shard->pai, ALLOC_MAX, PAGE, false, false,
&deferred_work_generated);
expect_ptr_not_null(edata, "Allocation of small max failed");
@ -169,7 +169,7 @@ TEST_BEGIN(test_stress) {
mem_tree_t tree;
mem_tree_new(&tree);
bool deferred_work_generated;
bool deferred_work_generated = false;
for (size_t i = 0; i < 100 * 1000; i++) {
size_t operation = prng_range_zu(&prng_state, 2);
@ -252,7 +252,7 @@ TEST_BEGIN(test_alloc_dalloc_batch) {
&test_hpa_shard_opts_default);
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
bool deferred_work_generated;
bool deferred_work_generated = false;
enum {NALLOCS = 8};
@ -369,7 +369,7 @@ TEST_BEGIN(test_defer_time) {
hpa_shard_t *shard = create_test_data(&hooks, &opts);
bool deferred_work_generated;
bool deferred_work_generated = false;
nstime_init(&defer_curtime, 0);
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());

View File

@ -88,7 +88,7 @@ static void *
do_alloc_free_purge(void *arg) {
test_data_t *test_data = (test_data_t *)arg;
for (int i = 0; i < 10 * 1000; i++) {
bool deferred_work_generated;
bool deferred_work_generated = false;
edata_t *edata = pa_alloc(TSDN_NULL, &test_data->shard, PAGE,
PAGE, /* slab */ false, /* szind */ 0, /* zero */ false,
/* guarded */ false, &deferred_work_generated);

View File

@ -54,7 +54,6 @@ pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
bool *deferred_work_generated) {
assert(!guarded);
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
*deferred_work_generated = false;
if (ta->alloc_fail) {
return NULL;
}
@ -76,7 +75,6 @@ pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results,
bool *deferred_work_generated) {
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
*deferred_work_generated = false;
if (ta->alloc_fail) {
return 0;
}
@ -100,7 +98,6 @@ pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero,
bool *deferred_work_generated) {
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
*deferred_work_generated = false;
ta->expand_count++;
return ta->expand_return_value;
}
@ -109,7 +106,6 @@ static bool
pai_test_allocator_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool *deferred_work_generated) {
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
*deferred_work_generated = false;
ta->shrink_count++;
return ta->shrink_return_value;
}
@ -118,7 +114,6 @@ static void
pai_test_allocator_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
bool *deferred_work_generated) {
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
*deferred_work_generated = false;
ta->dalloc_count++;
free(edata);
}
@ -127,7 +122,6 @@ static void
pai_test_allocator_dalloc_batch(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list, bool *deferred_work_generated) {
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
*deferred_work_generated = false;
edata_t *edata;
while ((edata = edata_list_active_first(list)) != NULL) {
@ -179,7 +173,7 @@ TEST_BEGIN(test_reuse) {
enum { NALLOCS = 11 };
edata_t *one_page[NALLOCS];
edata_t *two_page[NALLOCS];
bool deferred_work_generated;
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 2 * PAGE,
/* max_bytes */ 2 * (NALLOCS * PAGE + NALLOCS * 2 * PAGE));
for (int i = 0; i < NALLOCS; i++) {
@ -256,7 +250,7 @@ TEST_BEGIN(test_auto_flush) {
enum { NALLOCS = 10 };
edata_t *extra_alloc;
edata_t *allocs[NALLOCS];
bool deferred_work_generated;
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
/* max_bytes */ NALLOCS * PAGE);
for (int i = 0; i < NALLOCS; i++) {
@ -312,7 +306,7 @@ do_disable_flush_test(bool is_disable) {
enum { NALLOCS = 11 };
edata_t *allocs[NALLOCS];
bool deferred_work_generated;
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
/* max_bytes */ NALLOCS * PAGE);
for (int i = 0; i < NALLOCS; i++) {
@ -380,7 +374,7 @@ TEST_BEGIN(test_max_alloc_respected) {
size_t max_alloc = 2 * PAGE;
size_t attempted_alloc = 3 * PAGE;
bool deferred_work_generated;
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, max_alloc,
/* max_bytes */ 1000 * PAGE);
@ -414,7 +408,7 @@ TEST_BEGIN(test_expand_shrink_delegate) {
/* See the note above -- we can't use the real tsd. */
tsdn_t *tsdn = TSDN_NULL;
bool deferred_work_generated;
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ 10 * PAGE,
/* max_bytes */ 1000 * PAGE);
@ -458,7 +452,7 @@ TEST_BEGIN(test_nshards_0) {
opts.nshards = 0;
sec_init(TSDN_NULL, &sec, base, &ta.pai, &opts);
bool deferred_work_generated;
bool deferred_work_generated = false;
edata_t *edata = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,
/* zero */ false, /* guarded */ false,
&deferred_work_generated);
@ -495,7 +489,7 @@ TEST_BEGIN(test_stats_simple) {
FLUSH_PAGES = 20,
};
bool deferred_work_generated;
bool deferred_work_generated = false;
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
/* max_bytes */ FLUSH_PAGES * PAGE);
@ -544,7 +538,7 @@ TEST_BEGIN(test_stats_auto_flush) {
edata_t *extra_alloc1;
edata_t *allocs[2 * FLUSH_PAGES];
bool deferred_work_generated;
bool deferred_work_generated = false;
extra_alloc0 = pai_alloc(tsdn, &sec.pai, PAGE, PAGE, /* zero */ false,
/* guarded */ false, &deferred_work_generated);
@ -590,7 +584,7 @@ TEST_BEGIN(test_stats_manual_flush) {
test_sec_init(&sec, &ta.pai, /* nshards */ 1, /* max_alloc */ PAGE,
/* max_bytes */ FLUSH_PAGES * PAGE);
bool deferred_work_generated;
bool deferred_work_generated = false;
edata_t *allocs[FLUSH_PAGES];
for (size_t i = 0; i < FLUSH_PAGES; i++) {
allocs[i] = pai_alloc(tsdn, &sec.pai, PAGE, PAGE,