Fix remaining static analysis warnings

Fix or suppress the remaining warnings generated by static analysis.
This is a necessary step before we can incorporate static analysis into
CI. Where possible, I've preferred to modify the code itself instead of
just disabling the warning with a magic comment, so that if we decide to
use different static analysis tools in the future we will be covered
against them raising similar warnings.
This commit is contained in:
Kevin Svetlitski
2023-05-12 13:17:52 -07:00
committed by Qi Wang
parent 210f0d0b2b
commit bb0333e745
12 changed files with 56 additions and 16 deletions

View File

@@ -340,8 +340,9 @@ background_thread_create_signals_masked(pthread_t *thread,
}
static bool
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
bool *created_threads) {
check_background_thread_creation(tsd_t *tsd,
const size_t const_max_background_threads,
unsigned *n_created, bool *created_threads) {
bool ret = false;
if (likely(*n_created == n_background_threads)) {
return ret;
@@ -349,7 +350,7 @@ check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
tsdn_t *tsdn = tsd_tsdn(tsd);
malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
for (unsigned i = 1; i < max_background_threads; i++) {
for (unsigned i = 1; i < const_max_background_threads; i++) {
if (created_threads[i]) {
continue;
}
@@ -391,10 +392,19 @@ check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
static void
background_thread0_work(tsd_t *tsd) {
/* Thread0 is also responsible for launching / terminating threads. */
VARIABLE_ARRAY(bool, created_threads, max_background_threads);
/*
* Thread0 is also responsible for launching / terminating threads.
* We are guaranteed that `max_background_threads` will not change
* underneath us. Unfortunately static analysis tools do not understand
* this, so we are extracting `max_background_threads` into a local
* variable solely for the sake of exposing this information to such
* tools.
*/
const size_t const_max_background_threads = max_background_threads;
assert(const_max_background_threads > 0);
VARIABLE_ARRAY(bool, created_threads, const_max_background_threads);
unsigned i;
for (i = 1; i < max_background_threads; i++) {
for (i = 1; i < const_max_background_threads; i++) {
created_threads[i] = false;
}
/* Start working, and create more threads when asked. */
@@ -404,8 +414,8 @@ background_thread0_work(tsd_t *tsd) {
&background_thread_info[0])) {
continue;
}
if (check_background_thread_creation(tsd, &n_created,
(bool *)&created_threads)) {
if (check_background_thread_creation(tsd, const_max_background_threads,
&n_created, (bool *)&created_threads)) {
continue;
}
background_work_sleep_once(tsd_tsdn(tsd),
@@ -417,7 +427,7 @@ background_thread0_work(tsd_t *tsd) {
* the global background_thread mutex (and is waiting) for us.
*/
assert(!background_thread_enabled());
for (i = 1; i < max_background_threads; i++) {
for (i = 1; i < const_max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
assert(info->state != background_thread_paused);
if (created_threads[i]) {

View File

@@ -1314,9 +1314,18 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
static void
ctl_refresh(tsdn_t *tsdn) {
unsigned i;
malloc_mutex_assert_owner(tsdn, &ctl_mtx);
/*
* We are guaranteed that `ctl_arenas->narenas` will not change
* underneath us since we hold `ctl_mtx` for the duration of this
* function. Unfortunately static analysis tools do not understand this,
* so we are extracting `narenas` into a local variable solely for the
* sake of exposing this information to such tools.
*/
const unsigned narenas = ctl_arenas->narenas;
assert(narenas > 0);
ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
/*
* Clear sum stats, since they will be merged into by
@@ -1324,11 +1333,11 @@ ctl_refresh(tsdn_t *tsdn) {
*/
ctl_arena_clear(ctl_sarena);
for (i = 0; i < ctl_arenas->narenas; i++) {
for (unsigned i = 0; i < narenas; i++) {
tarenas[i] = arena_get(tsdn, i, false);
}
for (i = 0; i < ctl_arenas->narenas; i++) {
for (unsigned i = 0; i < narenas; i++) {
ctl_arena_t *ctl_arena = arenas_i(i);
bool initialized = (tarenas[i] != NULL);

View File

@@ -157,6 +157,7 @@ decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
uint64_t
decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
assert(decay_interval_ns != 0);
size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
uint64_t npages_purge;

View File

@@ -407,6 +407,7 @@ extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
expand_edata, EXTENT_PAI_PAC, ecache->state);
if (edata != NULL) {
/* NOLINTNEXTLINE(readability-suspicious-call-argument) */
extent_assert_can_expand(expand_edata, edata);
if (edata_size_get(edata) < size) {
emap_release_edata(tsdn, pac->emap, edata,

View File

@@ -991,7 +991,8 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
* Each source should only be read once, to minimize # of
* syscalls on init.
*/
assert(read_source++ == which_source);
assert(read_source == which_source);
read_source++;
}
assert(which_source < MALLOC_CONF_NSOURCES);

View File

@@ -1,5 +1,6 @@
#include <mutex>
#include <new>
// NOLINTBEGIN(misc-use-anonymous-namespace)
#define JEMALLOC_CPP_CPP_
#ifdef __cplusplus
@@ -258,3 +259,4 @@ operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexc
}
#endif // __cpp_aligned_new
// NOLINTEND(misc-use-anonymous-namespace)

View File

@@ -316,7 +316,8 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
if (alt_form) {
s -= 2;
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
s[0] = '0';
s[1] = uppercase ? 'X' : 'x';
}
return s;
}