Clean compilation -Wextra
Before this commit jemalloc produced many warnings when compiled with -Wextra with both Clang and GCC. This commit fixes the issues raised by these warnings or suppresses them if they were spurious at least for the Clang and GCC versions covered by CI. This commit: * adds `JEMALLOC_DIAGNOSTIC` macros: `JEMALLOC_DIAGNOSTIC_{PUSH,POP}` are used to modify the stack of enabled diagnostics. The `JEMALLOC_DIAGNOSTIC_IGNORE_...` macros are used to ignore a concrete diagnostic. * adds `JEMALLOC_FALLTHROUGH` macro to explicitly state that falling through `case` labels in a `switch` statement is intended * Removes all UNUSED annotations on function parameters. The warning -Wunused-parameter is now disabled globally in `jemalloc_internal_macros.h` for all translation units that include that header. It is never re-enabled since that header cannot be included by users. * locally suppresses some -Wextra diagnostics: * `-Wmissing-field-initializer` is buggy in older Clang and GCC versions, where it does not understanding that, in C, `= {0}` is a common C idiom to initialize a struct to zero * `-Wtype-bounds` is suppressed in a particular situation where a generic macro, used in multiple different places, compares an unsigned integer for smaller than zero, which is always true. * `-Walloc-larger-than-size=` diagnostics warn when an allocation function is called with a size that is too large (out-of-range). These are suppressed in the parts of the tests where `jemalloc` explicitly does this to test that the allocation functions fail properly. * adds a new CI build bot that runs the log unit test on CI. Closes #1196 .
This commit is contained in:
@@ -49,7 +49,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
|
||||
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
@@ -68,7 +68,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) {
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@@ -318,7 +318,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
UNUSED alloc_ctx_t local_ctx;
|
||||
alloc_ctx_t local_ctx;
|
||||
if (config_prof && opt_prof) {
|
||||
if (alloc_ctx == NULL) {
|
||||
/* Uncommon case and should be a static check. */
|
||||
|
@@ -6,6 +6,8 @@
|
||||
#include "jemalloc/internal/mutex_prof.h"
|
||||
#include "jemalloc/internal/size_classes.h"
|
||||
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
/*
|
||||
* In those architectures that support 64-bit atomics, we use atomic updates for
|
||||
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
|
||||
@@ -95,7 +97,7 @@ struct arena_stats_s {
|
||||
};
|
||||
|
||||
static inline bool
|
||||
arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
|
||||
if (config_debug) {
|
||||
for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
|
||||
assert(((char *)arena_stats)[i] == 0);
|
||||
@@ -147,11 +149,11 @@ arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
#endif
|
||||
}
|
||||
|
||||
UNUSED static inline void
|
||||
static inline void
|
||||
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
arena_stats_u64_t *p, uint64_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
|
||||
uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
@@ -176,7 +178,8 @@ arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
|
||||
}
|
||||
|
||||
static inline size_t
|
||||
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
|
||||
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
atomic_zu_t *p) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
return atomic_load_zu(p, ATOMIC_RELAXED);
|
||||
#else
|
||||
@@ -186,8 +189,8 @@ arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
atomic_zu_t *p, size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
|
||||
#else
|
||||
@@ -198,10 +201,10 @@ arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
}
|
||||
|
||||
static inline void
|
||||
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
|
||||
size_t x) {
|
||||
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
atomic_zu_t *p, size_t x) {
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
|
||||
size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
|
||||
assert(r - x <= r);
|
||||
#else
|
||||
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
|
||||
@@ -233,5 +236,4 @@ arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
}
|
||||
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
|
||||
|
@@ -113,8 +113,8 @@ atomic_store_##short_type(atomic_##short_type##_t *a, \
|
||||
} \
|
||||
\
|
||||
ATOMIC_INLINE type \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
atomic_memory_order_t mo) { \
|
||||
/* \
|
||||
* Because of FreeBSD, we care about gcc 4.2, which doesn't have\
|
||||
* an atomic exchange builtin. We fake it with a CAS loop. \
|
||||
@@ -129,8 +129,9 @@ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
|
||||
\
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||
desired); \
|
||||
if (prev == *expected) { \
|
||||
@@ -142,8 +143,9 @@ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
|
||||
} \
|
||||
ATOMIC_INLINE bool \
|
||||
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
|
||||
type *expected, type desired, atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type *expected, type desired, \
|
||||
atomic_memory_order_t success_mo, \
|
||||
atomic_memory_order_t failure_mo) { \
|
||||
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
|
||||
desired); \
|
||||
if (prev == *expected) { \
|
||||
|
@@ -190,7 +190,7 @@ extent_addr_set(extent_t *extent, void *addr) {
|
||||
}
|
||||
|
||||
static inline void
|
||||
extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||
|
||||
if (alignment < PAGE) {
|
||||
|
@@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
uint32_t k1 = 0;
|
||||
|
||||
switch (len & 3) {
|
||||
case 3: k1 ^= tail[2] << 16;
|
||||
case 2: k1 ^= tail[1] << 8;
|
||||
case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
|
||||
k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
@@ -119,7 +119,7 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
return h1;
|
||||
}
|
||||
|
||||
UNUSED static inline void
|
||||
static inline void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
@@ -177,28 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint32_t k4 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k4 ^= tail[14] << 16;
|
||||
case 14: k4 ^= tail[13] << 8;
|
||||
case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 13: k4 ^= tail[12] << 0;
|
||||
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
|
||||
|
||||
case 12: k3 ^= tail[11] << 24;
|
||||
case 11: k3 ^= tail[10] << 16;
|
||||
case 10: k3 ^= tail[ 9] << 8;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
|
||||
case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 9: k3 ^= tail[ 8] << 0;
|
||||
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
|
||||
|
||||
case 8: k2 ^= tail[ 7] << 24;
|
||||
case 7: k2 ^= tail[ 6] << 16;
|
||||
case 6: k2 ^= tail[ 5] << 8;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
|
||||
case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 5: k2 ^= tail[ 4] << 0;
|
||||
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
|
||||
|
||||
case 4: k1 ^= tail[ 3] << 24;
|
||||
case 3: k1 ^= tail[ 2] << 16;
|
||||
case 2: k1 ^= tail[ 1] << 8;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
|
||||
case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
|
||||
case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
|
||||
case 1: k1 ^= tail[ 0] << 0;
|
||||
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
|
||||
JEMALLOC_FALLTHROUGH
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,7 +221,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
r_out[1] = (((uint64_t) h4) << 32) | h3;
|
||||
}
|
||||
|
||||
UNUSED static inline void
|
||||
static inline void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
@@ -260,22 +261,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15) {
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */
|
||||
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
|
||||
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
|
||||
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
|
||||
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
|
||||
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
|
||||
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH
|
||||
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
|
||||
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
|
||||
/* falls through */
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */
|
||||
JEMALLOC_FALLTHROUGH
|
||||
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
|
||||
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
|
||||
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH
|
||||
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH
|
||||
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
|
||||
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
|
||||
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH
|
||||
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
|
||||
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
|
||||
}
|
||||
|
@@ -40,4 +40,62 @@
|
||||
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
|
||||
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
|
||||
|
||||
#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
|
||||
&& defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
|
||||
#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
|
||||
#else
|
||||
#define JEMALLOC_FALLTHROUGH /* falls through */
|
||||
#endif
|
||||
|
||||
|
||||
/* Diagnostic suppression macros */
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
|
||||
# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
#elif defined(__GNUC__) || defined(__clang__)
|
||||
/*
|
||||
* The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
|
||||
* diagnostic suppression macros and should not be used anywhere else.
|
||||
*/
|
||||
# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
|
||||
# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
|
||||
# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
|
||||
# else
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# endif
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
|
||||
JEMALLOC_DIAGNOSTIC_PUSH \
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
|
||||
#else
|
||||
# define JEMALLOC_DIAGNOSTIC_PUSH
|
||||
# define JEMALLOC_DIAGNOSTIC_POP
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
|
||||
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
|
||||
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Disables spurious diagnostics for all headers
|
||||
* Since these headers are not included by users directly,
|
||||
* it does not affect their diagnostic settings.
|
||||
*/
|
||||
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_MACROS_H */
|
||||
|
@@ -101,9 +101,15 @@ struct malloc_mutex_s {
|
||||
#ifdef _WIN32
|
||||
# define MALLOC_MUTEX_INITIALIZER
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_OSSPIN))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
|
||||
@@ -111,12 +117,18 @@ struct malloc_mutex_s {
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
#else
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef JEMALLOC_LAZY_LOCK
|
||||
|
@@ -4,7 +4,8 @@
|
||||
#include "jemalloc/internal/mutex.h"
|
||||
|
||||
static inline bool
|
||||
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
||||
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
||||
uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
bool overflow;
|
||||
@@ -42,7 +43,8 @@ prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
||||
}
|
||||
|
||||
static inline void
|
||||
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
||||
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
||||
size_t usize) {
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
|
@@ -170,8 +170,8 @@ rtree_subkey(uintptr_t key, unsigned level) {
|
||||
*/
|
||||
# ifdef RTREE_LEAF_COMPACT
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
bool dependent) {
|
||||
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
|
||||
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
|
||||
}
|
||||
@@ -208,7 +208,7 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
|
||||
# endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
@@ -221,7 +221,7 @@ rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
@@ -233,7 +233,7 @@ rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool dependent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
|
||||
@@ -245,7 +245,7 @@ rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, extent_t *extent) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
|
||||
@@ -259,7 +259,7 @@ rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, szind_t szind) {
|
||||
assert(szind <= NSIZES);
|
||||
|
||||
@@ -277,7 +277,7 @@ rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
|
||||
@@ -292,8 +292,8 @@ rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
|
||||
}
|
||||
|
||||
static inline void
|
||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
|
||||
extent_t *extent, szind_t szind, bool slab) {
|
||||
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
|
||||
rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
|
||||
#ifdef RTREE_LEAF_COMPACT
|
||||
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
|
||||
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
|
||||
|
@@ -26,7 +26,7 @@
|
||||
* Zero initializer required for tsd initialization only. Proper initialization
|
||||
* done via rtree_ctx_data_init().
|
||||
*/
|
||||
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}}
|
||||
#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
|
||||
|
||||
|
||||
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
|
||||
|
@@ -40,7 +40,7 @@ tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
UNUSED size_t size, szind_t binind, bool zero, bool slow_path) {
|
||||
size_t size, szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
cache_bin_t *bin;
|
||||
bool tcache_success;
|
||||
|
@@ -77,7 +77,10 @@ tsd_wrapper_get(bool init) {
|
||||
abort();
|
||||
} else {
|
||||
wrapper->initialized = false;
|
||||
JEMALLOC_DIAGNOSTIC_PUSH
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
tsd_t initializer = TSD_INITIALIZER;
|
||||
JEMALLOC_DIAGNOSTIC_POP
|
||||
wrapper->val = initializer;
|
||||
}
|
||||
tsd_wrapper_set(wrapper);
|
||||
@@ -107,7 +110,10 @@ tsd_boot1(void) {
|
||||
tsd_boot_wrapper.initialized = false;
|
||||
tsd_cleanup(&tsd_boot_wrapper.val);
|
||||
wrapper->initialized = false;
|
||||
JEMALLOC_DIAGNOSTIC_PUSH
|
||||
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
|
||||
tsd_t initializer = TSD_INITIALIZER;
|
||||
JEMALLOC_DIAGNOSTIC_POP
|
||||
wrapper->val = initializer;
|
||||
tsd_wrapper_set(wrapper);
|
||||
}
|
||||
|
@@ -39,7 +39,7 @@ tsd_get_allocates(void) {
|
||||
|
||||
/* Get/set. */
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_get(UNUSED bool init) {
|
||||
tsd_get(bool init) {
|
||||
assert(tsd_booted);
|
||||
return &tsd_tls;
|
||||
}
|
||||
|
Reference in New Issue
Block a user