3d29d11ac2
Before this commit jemalloc produced many warnings when compiled with -Wextra with both Clang and GCC. This commit fixes the issues raised by these warnings or suppresses them if they were spurious at least for the Clang and GCC versions covered by CI. This commit: * adds `JEMALLOC_DIAGNOSTIC` macros: `JEMALLOC_DIAGNOSTIC_{PUSH,POP}` are used to modify the stack of enabled diagnostics. The `JEMALLOC_DIAGNOSTIC_IGNORE_...` macros are used to ignore a concrete diagnostic. * adds `JEMALLOC_FALLTHROUGH` macro to explicitly state that falling through `case` labels in a `switch` statement is intended * Removes all UNUSED annotations on function parameters. The warning -Wunused-parameter is now disabled globally in `jemalloc_internal_macros.h` for all translation units that include that header. It is never re-enabled since that header cannot be included by users. * locally suppresses some -Wextra diagnostics: * `-Wmissing-field-initializer` is buggy in older Clang and GCC versions, where it does not understanding that, in C, `= {0}` is a common C idiom to initialize a struct to zero * `-Wtype-bounds` is suppressed in a particular situation where a generic macro, used in multiple different places, compares an unsigned integer for smaller than zero, which is always true. * `-Walloc-larger-than-size=` diagnostics warn when an allocation function is called with a size that is too large (out-of-range). These are suppressed in the parts of the tests where `jemalloc` explicitly does this to test that the allocation functions fail properly. * adds a new CI build bot that runs the log unit test on CI. Closes #1196 .
86 lines
2.4 KiB
C
86 lines
2.4 KiB
C
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
|
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
|
|
|
#include "jemalloc/internal/mutex.h"
|
|
|
|
static inline bool
|
|
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
|
uint64_t accumbytes) {
|
|
cassert(config_prof);
|
|
|
|
bool overflow;
|
|
uint64_t a0, a1;
|
|
|
|
/*
|
|
* If the application allocates fast enough (and/or if idump is slow
|
|
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
|
* idump trigger coalescing. This is an intentional mechanism that
|
|
* avoids rate-limiting allocation.
|
|
*/
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
|
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
|
do {
|
|
a1 = a0 + accumbytes;
|
|
assert(a1 >= a0);
|
|
overflow = (a1 >= prof_interval);
|
|
if (overflow) {
|
|
a1 %= prof_interval;
|
|
}
|
|
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
|
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
|
#else
|
|
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
|
a0 = prof_accum->accumbytes;
|
|
a1 = a0 + accumbytes;
|
|
overflow = (a1 >= prof_interval);
|
|
if (overflow) {
|
|
a1 %= prof_interval;
|
|
}
|
|
prof_accum->accumbytes = a1;
|
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
|
#endif
|
|
return overflow;
|
|
}
|
|
|
|
static inline void
|
|
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
|
size_t usize) {
|
|
cassert(config_prof);
|
|
|
|
/*
|
|
* Cancel out as much of the excessive prof_accumbytes increase as
|
|
* possible without underflowing. Interval-triggered dumps occur
|
|
* slightly more often than intended as a result of incomplete
|
|
* canceling.
|
|
*/
|
|
uint64_t a0, a1;
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
|
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
|
do {
|
|
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
|
|
usize) : 0;
|
|
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
|
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
|
#else
|
|
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
|
a0 = prof_accum->accumbytes;
|
|
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
|
|
0;
|
|
prof_accum->accumbytes = a1;
|
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
|
#endif
|
|
}
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
prof_active_get_unlocked(void) {
|
|
/*
|
|
* Even if opt_prof is true, sampling can be temporarily disabled by
|
|
* setting prof_active to false. No locking is used when reading
|
|
* prof_active in the fast path, so there are no guarantees regarding
|
|
* how long it will take for all threads to notice state changes.
|
|
*/
|
|
return prof_active;
|
|
}
|
|
|
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|