Fix remaining static analysis warnings

Fix or suppress the remaining warnings generated by static analysis.
This is a necessary step before we can incorporate static analysis into
CI. Where possible, I've preferred to modify the code itself instead of
just disabling the warning with a magic comment, so that if we decide to
use different static analysis tools in the future we will be covered
against them raising similar warnings.
This commit is contained in:
Kevin Svetlitski 2023-05-12 13:17:52 -07:00 committed by Qi Wang
parent 210f0d0b2b
commit bb0333e745
12 changed files with 56 additions and 16 deletions

View File

@ -325,6 +325,8 @@ imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
tcache_t *tcache = tsd_tcachep_get(tsd);
assert(tcache == tcache_get(tsd));
cache_bin_t *bin = &tcache->bins[ind];
/* Suppress spurious warning from static analysis */
assert(bin != NULL);
bool tcache_success;
void *ret;

View File

@ -127,6 +127,7 @@ phn_merge_ordered(void *phn0, void *phn1, size_t offset,
phn0child = phn_lchild_get(phn0, offset);
phn_next_set(phn1, phn0child, offset);
if (phn0child != NULL) {
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
phn_prev_set(phn0child, phn1, offset);
}
phn_lchild_set(phn0, phn1, offset);
@ -143,6 +144,7 @@ phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) {
phn_merge_ordered(phn0, phn1, offset, cmp);
result = phn0;
} else {
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
phn_merge_ordered(phn1, phn0, offset, cmp);
result = phn1;
}
@ -188,10 +190,12 @@ phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) {
phn_prev_set(phn1, NULL, offset);
phn_next_set(phn1, NULL, offset);
phn0 = phn_merge(phn0, phn1, offset, cmp);
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
phn_next_set(tail, phn0, offset);
tail = phn0;
phn0 = phnrest;
} else {
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
phn_next_set(tail, phn0, offset);
tail = phn0;
phn0 = NULL;
@ -210,6 +214,7 @@ phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) {
if (head == NULL) {
break;
}
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
phn_next_set(tail, phn0, offset);
tail = phn0;
phn0 = head;
@ -298,6 +303,7 @@ ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) {
phn0 = phn_merge(phn0, phn1, offset, cmp);
phn_next_set(phn0, next_phn1, offset);
if (next_phn1 != NULL) {
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
phn_prev_set(next_phn1, phn0, offset);
}
phn_next_set(ph->root, phn0, offset);

View File

@ -867,6 +867,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \
} \
rb_remove_safety_checks(nodep, __func__); \
assert(nodep != NULL); \
assert(nodep->node == node); \
pathp--; \
if (pathp->node != node) { \

View File

@ -268,6 +268,10 @@ rtree_contents_encode(rtree_contents_t contents, void **bits,
unsigned *additional) {
#ifdef RTREE_LEAF_COMPACT
*bits = (void *)rtree_leaf_elm_bits_encode(contents);
/* Suppress spurious warning from static analysis */
if (config_debug) {
*additional = 0;
}
#else
*additional = (unsigned)contents.metadata.slab
| ((unsigned)contents.metadata.is_head << 1)
@ -299,7 +303,6 @@ rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
void *bits;
unsigned additional;
rtree_contents_encode(contents, &bits, &additional);
rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
}

View File

@ -341,6 +341,9 @@ witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_lock_error(witnesses, witness);
}
/* Suppress spurious warning from static analysis */
assert(ql_empty(witnesses) ||
qr_prev(ql_first(witnesses), link) != NULL);
ql_elm_new(witness, link);
ql_tail_insert(witnesses, witness, link);
}

View File

@ -340,8 +340,9 @@ background_thread_create_signals_masked(pthread_t *thread,
}
static bool
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
bool *created_threads) {
check_background_thread_creation(tsd_t *tsd,
const size_t const_max_background_threads,
unsigned *n_created, bool *created_threads) {
bool ret = false;
if (likely(*n_created == n_background_threads)) {
return ret;
@ -349,7 +350,7 @@ check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
for (unsigned i = 1; i < max_background_threads; i++) {
for (unsigned i = 1; i < const_max_background_threads; i++) {
if (created_threads[i]) {
continue;
}
@ -391,10 +392,19 @@ check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
static void
background_thread0_work(tsd_t *tsd) {
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY(bool, created_threads, max_background_threads);
/*
* Thread0 is also responsible for launching / terminating threads.
* We are guaranteed that `max_background_threads` will not change
* underneath us. Unfortunately static analysis tools do not understand
* this, so we are extracting `max_background_threads` into a local
* variable solely for the sake of exposing this information to such
* tools.
*/
const size_t const_max_background_threads = max_background_threads;
assert(const_max_background_threads > 0);
VARIABLE_ARRAY(bool, created_threads, const_max_background_threads);
unsigned i;
for (i = 1; i < max_background_threads; i++) {
for (i = 1; i < const_max_background_threads; i++) {
created_threads[i] = false;
}
/* Start working, and create more threads when asked. */
@ -404,8 +414,8 @@ background_thread0_work(tsd_t *tsd) {
&background_thread_info[0])) {
continue;
}
if (check_background_thread_creation(tsd, &n_created,
(bool *)&created_threads)) {
if (check_background_thread_creation(tsd, const_max_background_threads,
&n_created, (bool *)&created_threads)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd),
@ -417,7 +427,7 @@ background_thread0_work(tsd_t *tsd) {
* the global background_thread mutex (and is waiting) for us.
*/
assert(!background_thread_enabled());
for (i = 1; i < max_background_threads; i++) {
for (i = 1; i < const_max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
assert(info->state != background_thread_paused);
if (created_threads[i]) {

View File

@ -1314,9 +1314,18 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
static void
ctl_refresh(tsdn_t *tsdn) {
unsigned i;
malloc_mutex_assert_owner(tsdn, &ctl_mtx);
/*
* We are guaranteed that `ctl_arenas->narenas` will not change
* underneath us since we hold `ctl_mtx` for the duration of this
* function. Unfortunately static analysis tools do not understand this,
* so we are extracting `narenas` into a local variable solely for the
* sake of exposing this information to such tools.
*/
const unsigned narenas = ctl_arenas->narenas;
assert(narenas > 0);
ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
/*
* Clear sum stats, since they will be merged into by
@ -1324,11 +1333,11 @@ ctl_refresh(tsdn_t *tsdn) {
*/
ctl_arena_clear(ctl_sarena);
for (i = 0; i < ctl_arenas->narenas; i++) {
for (unsigned i = 0; i < narenas; i++) {
tarenas[i] = arena_get(tsdn, i, false);
}
for (i = 0; i < ctl_arenas->narenas; i++) {
for (unsigned i = 0; i < narenas; i++) {
ctl_arena_t *ctl_arena = arenas_i(i);
bool initialized = (tarenas[i] != NULL);

View File

@ -157,6 +157,7 @@ decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
uint64_t
decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
assert(decay_interval_ns != 0);
size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
uint64_t npages_purge;

View File

@ -407,6 +407,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
expand_edata, EXTENT_PAI_PAC, ecache->state);
if (edata != NULL) {
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
extent_assert_can_expand(expand_edata, edata);
if (edata_size_get(edata) < size) {
emap_release_edata(tsdn, pac->emap, edata,

View File

@ -991,7 +991,8 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
* Each source should only be read once, to minimize # of
* syscalls on init.
*/
assert(read_source++ == which_source);
assert(read_source == which_source);
read_source++;
}
assert(which_source < MALLOC_CONF_NSOURCES);

View File

@ -1,5 +1,6 @@
#include <mutex>
#include <new>
// NOLINTBEGIN(misc-use-anonymous-namespace)
#define JEMALLOC_CPP_CPP_
#ifdef __cplusplus
@ -258,3 +259,4 @@ operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexc
}
#endif // __cpp_aligned_new
// NOLINTEND(misc-use-anonymous-namespace)

View File

@ -316,7 +316,8 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
s[0] = '0';
s[1] = uppercase ? 'X' : 'x';
}
return s;
}