Initialize deferred_work_generated
As the code evolves, some code paths that have previously assigned deferred_work_generated may cease being reached. This would leave the value uninitialized. This change initializes the value for safety.
This commit is contained in:
committed by
Alexander Lapenkov
parent
912324a1ac
commit
c9ebff0fd6
@@ -324,7 +324,7 @@ arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
|
||||
edata_t *
|
||||
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero) {
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
szind_t szind = sz_size2index(usize);
|
||||
size_t esize = usize + sz_large_pad;
|
||||
|
||||
@@ -561,7 +561,7 @@ arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
|
||||
|
||||
void
|
||||
arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
|
||||
if (deferred_work_generated) {
|
||||
arena_handle_deferred_work(tsdn, arena);
|
||||
@@ -825,7 +825,7 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
|
||||
static edata_t *
|
||||
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
|
||||
const bin_info_t *bin_info) {
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
WITNESS_RANK_CORE, 0);
|
||||
|
||||
|
@@ -785,7 +785,6 @@ static bool
|
||||
hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
size_t new_size, bool zero, bool *deferred_work_generated) {
|
||||
/* Expand not yet supported. */
|
||||
*deferred_work_generated = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -793,7 +792,6 @@ static bool
|
||||
hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool *deferred_work_generated) {
|
||||
/* Shrink not yet supported. */
|
||||
*deferred_work_generated = false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -64,7 +64,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
|
||||
usize + sz_large_pad, sz_size2index(usize),
|
||||
&deferred_work_generated);
|
||||
@@ -90,7 +90,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
||||
|
||||
szind_t szind = sz_size2index(usize);
|
||||
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
|
||||
szind, zero, &deferred_work_generated);
|
||||
|
||||
@@ -249,7 +249,7 @@ large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
|
||||
|
||||
static void
|
||||
large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
|
||||
if (deferred_work_generated) {
|
||||
arena_handle_deferred_work(tsdn, arena);
|
||||
|
1
src/pa.c
1
src/pa.c
@@ -126,7 +126,6 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
assert(!guarded || alignment <= PAGE);
|
||||
|
||||
edata_t *edata = NULL;
|
||||
*deferred_work_generated = false;
|
||||
if (!guarded && pa_shard_uses_hpa(shard)) {
|
||||
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
|
@@ -157,8 +157,6 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool *deferred_work_generated) {
|
||||
*deferred_work_generated = false;
|
||||
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
@@ -179,8 +177,6 @@ pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
*deferred_work_generated = false;
|
||||
|
||||
size_t mapped_add = 0;
|
||||
size_t expand_amount = new_size - old_size;
|
||||
|
||||
@@ -221,8 +217,6 @@ pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
|
||||
|
||||
size_t shrink_amount = old_size - new_size;
|
||||
|
||||
*deferred_work_generated = false;
|
||||
|
||||
if (ehooks_split_will_fail(ehooks)) {
|
||||
return true;
|
||||
}
|
||||
|
@@ -148,7 +148,7 @@ sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
|
||||
}
|
||||
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
@@ -178,7 +178,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
|
||||
|
||||
edata_list_active_t result;
|
||||
edata_list_active_init(&result);
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
|
||||
1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated);
|
||||
|
||||
@@ -223,7 +223,6 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
assert(!guarded);
|
||||
|
||||
sec_t *sec = (sec_t *)self;
|
||||
*deferred_work_generated = false;
|
||||
|
||||
if (zero || alignment > PAGE || sec->opts.nshards == 0
|
||||
|| size > sec->opts.max_alloc) {
|
||||
@@ -291,7 +290,7 @@ sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
|
||||
* we're disabling the HPA or resetting the arena, both of which are
|
||||
* rare pathways.
|
||||
*/
|
||||
bool deferred_work_generated;
|
||||
bool deferred_work_generated = false;
|
||||
pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
|
||||
&deferred_work_generated);
|
||||
}
|
||||
@@ -341,7 +340,6 @@ sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
|
||||
malloc_mutex_lock(tsdn, &shard->mtx);
|
||||
if (shard->enabled) {
|
||||
*deferred_work_generated = false;
|
||||
sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
|
||||
} else {
|
||||
malloc_mutex_unlock(tsdn, &shard->mtx);
|
||||
|
Reference in New Issue
Block a user