Clean compilation -Wextra

Before this commit jemalloc produced many warnings when compiled with -Wextra
with both Clang and GCC. This commit fixes the issues raised by these warnings
or suppresses them if they were spurious at least for the Clang and GCC
versions covered by CI.

This commit:

* adds `JEMALLOC_DIAGNOSTIC` macros: `JEMALLOC_DIAGNOSTIC_{PUSH,POP}` are
  used to modify the stack of enabled diagnostics. The
  `JEMALLOC_DIAGNOSTIC_IGNORE_...` macros are used to ignore a concrete
  diagnostic.

* adds `JEMALLOC_FALLTHROUGH` macro to explicitly state that falling
  through `case` labels in a `switch` statement is intended

* Removes all UNUSED annotations on function parameters. The warning
  -Wunused-parameter is now disabled globally in
  `jemalloc_internal_macros.h` for all translation units that include
  that header. It is never re-enabled since that header cannot be
  included by users.

* locally suppresses some -Wextra diagnostics:

  * `-Wmissing-field-initializer` is buggy in older Clang and GCC versions,
    where it does not understanding that, in C, `= {0}` is a common C idiom
    to initialize a struct to zero

  * `-Wtype-bounds` is suppressed in a particular situation where a generic
    macro, used in multiple different places, compares an unsigned integer for
    smaller than zero, which is always true.

  * `-Walloc-larger-than-size=` diagnostics warn when an allocation function is
    called with a size that is too large (out-of-range). These are suppressed in
    the parts of the tests where `jemalloc` explicitly does this to test that the
    allocation functions fail properly.

* adds a new CI build bot that runs the log unit test on CI.

Closes #1196 .
This commit is contained in:
gnzlbg 2018-05-03 11:40:53 +02:00 committed by David Goldblatt
parent ce5c073fe5
commit 3d29d11ac2
29 changed files with 328 additions and 147 deletions

View File

@ -143,7 +143,12 @@ matrix:
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=dss:primary,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux - os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds" env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=percpu_arena:percpu,background_thread:true" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
- os: linux
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
script:
- make check
- make -j test/unit/log
- test/unit/log
before_script: before_script:
- autoconf - autoconf

View File

@ -242,6 +242,7 @@ if test "x$GCC" = "xyes" ; then
fi fi
fi fi
JE_CFLAGS_ADD([-Wall]) JE_CFLAGS_ADD([-Wall])
JE_CFLAGS_ADD([-Wextra])
JE_CFLAGS_ADD([-Wshorten-64-to-32]) JE_CFLAGS_ADD([-Wshorten-64-to-32])
JE_CFLAGS_ADD([-Wsign-compare]) JE_CFLAGS_ADD([-Wsign-compare])
JE_CFLAGS_ADD([-Wundef]) JE_CFLAGS_ADD([-Wundef])
@ -289,6 +290,7 @@ if test "x$enable_cxx" = "x1" ; then
AX_CXX_COMPILE_STDCXX([14], [noext], [optional]) AX_CXX_COMPILE_STDCXX([14], [noext], [optional])
if test "x${HAVE_CXX14}" = "x1" ; then if test "x${HAVE_CXX14}" = "x1" ; then
JE_CXXFLAGS_ADD([-Wall]) JE_CXXFLAGS_ADD([-Wall])
JE_CXXFLAGS_ADD([-Wextra])
JE_CXXFLAGS_ADD([-g3]) JE_CXXFLAGS_ADD([-g3])
SAVED_LIBS="${LIBS}" SAVED_LIBS="${LIBS}"

View File

@ -49,7 +49,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize, arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
@ -68,7 +68,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, UNUSED size_t usize,
} }
static inline void static inline void
arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, UNUSED prof_tctx_t *tctx) { arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
@ -318,7 +318,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
szind_t szind; szind_t szind;
bool slab; bool slab;
UNUSED alloc_ctx_t local_ctx; alloc_ctx_t local_ctx;
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
if (alloc_ctx == NULL) { if (alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */ /* Uncommon case and should be a static check. */

View File

@ -6,6 +6,8 @@
#include "jemalloc/internal/mutex_prof.h" #include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/* /*
* In those architectures that support 64-bit atomics, we use atomic updates for * In those architectures that support 64-bit atomics, we use atomic updates for
* our 64-bit values. Otherwise, we use a plain uint64_t and synchronize * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
@ -95,7 +97,7 @@ struct arena_stats_s {
}; };
static inline bool static inline bool
arena_stats_init(UNUSED tsdn_t *tsdn, arena_stats_t *arena_stats) { arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
if (config_debug) { if (config_debug) {
for (size_t i = 0; i < sizeof(arena_stats_t); i++) { for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
assert(((char *)arena_stats)[i] == 0); assert(((char *)arena_stats)[i] == 0);
@ -147,11 +149,11 @@ arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
#endif #endif
} }
UNUSED static inline void static inline void
arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
arena_stats_u64_t *p, uint64_t x) { arena_stats_u64_t *p, uint64_t x) {
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
assert(r - x <= r); assert(r - x <= r);
#else #else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
@ -176,7 +178,8 @@ arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
} }
static inline size_t static inline size_t
arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
atomic_zu_t *p) {
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
return atomic_load_zu(p, ATOMIC_RELAXED); return atomic_load_zu(p, ATOMIC_RELAXED);
#else #else
@ -186,8 +189,8 @@ arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
} }
static inline void static inline void
arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
size_t x) { atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
#else #else
@ -198,10 +201,10 @@ arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
} }
static inline void static inline void
arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
size_t x) { atomic_zu_t *p, size_t x) {
#ifdef JEMALLOC_ATOMIC_U64 #ifdef JEMALLOC_ATOMIC_U64
UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
assert(r - x <= r); assert(r - x <= r);
#else #else
malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
@ -233,5 +236,4 @@ arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
arena_stats_unlock(tsdn, arena_stats); arena_stats_unlock(tsdn, arena_stats);
} }
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */ #endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */

View File

@ -129,7 +129,8 @@ atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
\ \
ATOMIC_INLINE bool \ ATOMIC_INLINE bool \
atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \ type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \ atomic_memory_order_t failure_mo) { \
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
desired); \ desired); \
@ -142,7 +143,8 @@ atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
} \ } \
ATOMIC_INLINE bool \ ATOMIC_INLINE bool \
atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
type *expected, type desired, atomic_memory_order_t success_mo, \ type *expected, type desired, \
atomic_memory_order_t success_mo, \
atomic_memory_order_t failure_mo) { \ atomic_memory_order_t failure_mo) { \
type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ type prev = __sync_val_compare_and_swap(&a->repr, *expected, \
desired); \ desired); \

View File

@ -190,7 +190,7 @@ extent_addr_set(extent_t *extent, void *addr) {
} }
static inline void static inline void
extent_addr_randomize(UNUSED tsdn_t *tsdn, extent_t *extent, size_t alignment) { extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
assert(extent_base_get(extent) == extent_addr_get(extent)); assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) { if (alignment < PAGE) {

View File

@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
uint32_t k1 = 0; uint32_t k1 = 0;
switch (len & 3) { switch (len & 3) {
case 3: k1 ^= tail[2] << 16; case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
case 2: k1 ^= tail[1] << 8; case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1; k1 *= c2; h1 ^= k1;
} }
@ -119,7 +119,7 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
return h1; return h1;
} }
UNUSED static inline void static inline void
hash_x86_128(const void *key, const int len, uint32_t seed, hash_x86_128(const void *key, const int len, uint32_t seed,
uint64_t r_out[2]) { uint64_t r_out[2]) {
const uint8_t * data = (const uint8_t *) key; const uint8_t * data = (const uint8_t *) key;
@ -177,28 +177,29 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
uint32_t k4 = 0; uint32_t k4 = 0;
switch (len & 15) { switch (len & 15) {
case 15: k4 ^= tail[14] << 16; case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
case 14: k4 ^= tail[13] << 8; case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
case 13: k4 ^= tail[12] << 0; case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
JEMALLOC_FALLTHROUGH
case 12: k3 ^= tail[11] << 24; case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
case 11: k3 ^= tail[10] << 16; case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
case 10: k3 ^= tail[ 9] << 8; case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
case 9: k3 ^= tail[ 8] << 0; case 9: k3 ^= tail[ 8] << 0;
k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
JEMALLOC_FALLTHROUGH
case 8: k2 ^= tail[ 7] << 24; case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
case 7: k2 ^= tail[ 6] << 16; case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
case 6: k2 ^= tail[ 5] << 8; case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
case 5: k2 ^= tail[ 4] << 0; case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
JEMALLOC_FALLTHROUGH
case 4: k1 ^= tail[ 3] << 24; case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
case 3: k1 ^= tail[ 2] << 16; case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
case 2: k1 ^= tail[ 1] << 8; case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
case 1: k1 ^= tail[ 0] << 0; case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
JEMALLOC_FALLTHROUGH
} }
} }
@ -220,7 +221,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
r_out[1] = (((uint64_t) h4) << 32) | h3; r_out[1] = (((uint64_t) h4) << 32) | h3;
} }
UNUSED static inline void static inline void
hash_x64_128(const void *key, const int len, const uint32_t seed, hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t r_out[2]) { uint64_t r_out[2]) {
const uint8_t *data = (const uint8_t *) key; const uint8_t *data = (const uint8_t *) key;
@ -260,22 +261,22 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t k2 = 0; uint64_t k2 = 0;
switch (len & 15) { switch (len & 15) {
case 15: k2 ^= ((uint64_t)(tail[14])) << 48; /* falls through */ case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
case 14: k2 ^= ((uint64_t)(tail[13])) << 40; /* falls through */ case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
case 13: k2 ^= ((uint64_t)(tail[12])) << 32; /* falls through */ case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
case 12: k2 ^= ((uint64_t)(tail[11])) << 24; /* falls through */ case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
case 11: k2 ^= ((uint64_t)(tail[10])) << 16; /* falls through */ case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; /* falls through */ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
/* falls through */ JEMALLOC_FALLTHROUGH
case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; /* falls through */ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; /* falls through */ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; /* falls through */ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH
case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; /* falls through */ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH
case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; /* falls through */ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; /* falls through */ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; /* falls through */ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
} }

View File

@ -40,4 +40,62 @@
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head #define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__ #define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
&& defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
#else
#define JEMALLOC_FALLTHROUGH /* falls through */
#endif
/* Diagnostic suppression macros */
#if defined(_MSC_VER) && !defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
# define JEMALLOC_DIAGNOSTIC_POP __pragma(warning(pop))
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) __pragma(warning(disable:W))
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
#elif defined(__GNUC__) || defined(__clang__)
/*
* The JEMALLOC_PRAGMA__ macro is an implementation detail of the GCC and Clang
* diagnostic suppression macros and should not be used anywhere else.
*/
# define JEMALLOC_PRAGMA__(X) _Pragma(#X)
# define JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_PRAGMA__(GCC diagnostic push)
# define JEMALLOC_DIAGNOSTIC_POP JEMALLOC_PRAGMA__(GCC diagnostic pop)
# define JEMALLOC_DIAGNOSTIC_IGNORE(W) JEMALLOC_PRAGMA__(GCC diagnostic ignored W)
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wmissing-field-initializers")
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wtype-limits")
# define JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER \
JEMALLOC_DIAGNOSTIC_IGNORE("-Wunused-parameter")
# if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ >= 7)
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN \
JEMALLOC_DIAGNOSTIC_IGNORE("-Walloc-size-larger-than=")
# else
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# endif
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS \
JEMALLOC_DIAGNOSTIC_PUSH \
JEMALLOC_DIAGNOSTIC_IGNORE_UNUSED_PARAMETER
#else
# define JEMALLOC_DIAGNOSTIC_PUSH
# define JEMALLOC_DIAGNOSTIC_POP
# define JEMALLOC_DIAGNOSTIC_IGNORE(W)
# define JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
# define JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
# define JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
# define JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
#endif
/*
* Disables spurious diagnostics for all headers
* Since these headers are not included by users directly,
* it does not affect their diagnostic settings.
*/
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
#endif /* JEMALLOC_INTERNAL_MACROS_H */ #endif /* JEMALLOC_INTERNAL_MACROS_H */

View File

@ -101,9 +101,15 @@ struct malloc_mutex_s {
#ifdef _WIN32 #ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER # define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) #elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \ # define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif
#elif (defined(JEMALLOC_OSSPIN)) #elif (defined(JEMALLOC_OSSPIN))
# define MALLOC_MUTEX_INITIALIZER \ # define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, 0}}, \ {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \
@ -114,10 +120,16 @@ struct malloc_mutex_s {
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
#else #else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
# if defined(JEMALLOC_DEBUG)
# define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
# else
# define MALLOC_MUTEX_INITIALIZER \ # define MALLOC_MUTEX_INITIALIZER \
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
# endif # endif
#endif
#ifdef JEMALLOC_LAZY_LOCK #ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded; extern bool isthreaded;

View File

@ -4,7 +4,8 @@
#include "jemalloc/internal/mutex.h" #include "jemalloc/internal/mutex.h"
static inline bool static inline bool
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
uint64_t accumbytes) {
cassert(config_prof); cassert(config_prof);
bool overflow; bool overflow;
@ -42,7 +43,8 @@ prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
} }
static inline void static inline void
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) { prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
size_t usize) {
cassert(config_prof); cassert(config_prof);
/* /*

View File

@ -170,8 +170,8 @@ rtree_subkey(uintptr_t key, unsigned level) {
*/ */
# ifdef RTREE_LEAF_COMPACT # ifdef RTREE_LEAF_COMPACT
JEMALLOC_ALWAYS_INLINE uintptr_t JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
bool dependent) { rtree_leaf_elm_t *elm, bool dependent) {
return (uintptr_t)atomic_load_p(&elm->le_bits, dependent return (uintptr_t)atomic_load_p(&elm->le_bits, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE); ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
} }
@ -208,7 +208,7 @@ rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
# endif # endif
JEMALLOC_ALWAYS_INLINE extent_t * JEMALLOC_ALWAYS_INLINE extent_t *
rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) { rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT #ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
@ -221,7 +221,7 @@ rtree_leaf_elm_extent_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
} }
JEMALLOC_ALWAYS_INLINE szind_t JEMALLOC_ALWAYS_INLINE szind_t
rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) { rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT #ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
@ -233,7 +233,7 @@ rtree_leaf_elm_szind_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool
rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool dependent) { rtree_leaf_elm_t *elm, bool dependent) {
#ifdef RTREE_LEAF_COMPACT #ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
@ -245,7 +245,7 @@ rtree_leaf_elm_slab_read(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
} }
static inline void static inline void
rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, extent_t *extent) { rtree_leaf_elm_t *elm, extent_t *extent) {
#ifdef RTREE_LEAF_COMPACT #ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
@ -259,7 +259,7 @@ rtree_leaf_elm_extent_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
} }
static inline void static inline void
rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind) { rtree_leaf_elm_t *elm, szind_t szind) {
assert(szind <= NSIZES); assert(szind <= NSIZES);
@ -277,7 +277,7 @@ rtree_leaf_elm_szind_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
} }
static inline void static inline void
rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree, rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, bool slab) { rtree_leaf_elm_t *elm, bool slab) {
#ifdef RTREE_LEAF_COMPACT #ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
@ -292,8 +292,8 @@ rtree_leaf_elm_slab_write(UNUSED tsdn_t *tsdn, UNUSED rtree_t *rtree,
} }
static inline void static inline void
rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
extent_t *extent, szind_t szind, bool slab) { rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
#ifdef RTREE_LEAF_COMPACT #ifdef RTREE_LEAF_COMPACT
uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |

View File

@ -26,7 +26,7 @@
* Zero initializer required for tsd initialization only. Proper initialization * Zero initializer required for tsd initialization only. Proper initialization
* done via rtree_ctx_data_init(). * done via rtree_ctx_data_init().
*/ */
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}, {{0}}} #define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;

View File

@ -40,7 +40,7 @@ tcache_event(tsd_t *tsd, tcache_t *tcache) {
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
UNUSED size_t size, szind_t binind, bool zero, bool slow_path) { size_t size, szind_t binind, bool zero, bool slow_path) {
void *ret; void *ret;
cache_bin_t *bin; cache_bin_t *bin;
bool tcache_success; bool tcache_success;

View File

@ -77,7 +77,10 @@ tsd_wrapper_get(bool init) {
abort(); abort();
} else { } else {
wrapper->initialized = false; wrapper->initialized = false;
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
tsd_t initializer = TSD_INITIALIZER; tsd_t initializer = TSD_INITIALIZER;
JEMALLOC_DIAGNOSTIC_POP
wrapper->val = initializer; wrapper->val = initializer;
} }
tsd_wrapper_set(wrapper); tsd_wrapper_set(wrapper);
@ -107,7 +110,10 @@ tsd_boot1(void) {
tsd_boot_wrapper.initialized = false; tsd_boot_wrapper.initialized = false;
tsd_cleanup(&tsd_boot_wrapper.val); tsd_cleanup(&tsd_boot_wrapper.val);
wrapper->initialized = false; wrapper->initialized = false;
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
tsd_t initializer = TSD_INITIALIZER; tsd_t initializer = TSD_INITIALIZER;
JEMALLOC_DIAGNOSTIC_POP
wrapper->val = initializer; wrapper->val = initializer;
tsd_wrapper_set(wrapper); tsd_wrapper_set(wrapper);
} }

View File

@ -39,7 +39,7 @@ tsd_get_allocates(void) {
/* Get/set. */ /* Get/set. */
JEMALLOC_ALWAYS_INLINE tsd_t * JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_get(UNUSED bool init) { tsd_get(bool init) {
assert(tsd_booted); assert(tsd_booted);
return &tsd_tls; return &tsd_tls;
} }

View File

@ -11,6 +11,8 @@
#include "jemalloc/internal/size_classes.h" #include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
@ -65,7 +67,7 @@ static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
/******************************************************************************/ /******************************************************************************/
void void
arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy) { size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false); *nthreads += arena_nthreads_get(arena, false);
@ -752,7 +754,7 @@ static size_t
arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
bool all, extent_list_t *decay_extents, bool is_background_thread) { bool all, extent_list_t *decay_extents, bool is_background_thread) {
UNUSED size_t nmadvise, nunmapped; size_t nmadvise, nunmapped;
size_t npurged; size_t npurged;
if (config_stats) { if (config_stats) {
@ -843,7 +845,7 @@ arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
npages_limit, npages_decay_max, &decay_extents); npages_limit, npages_decay_max, &decay_extents);
if (npurge != 0) { if (npurge != 0) {
UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, size_t npurged = arena_decay_stashed(tsdn, arena,
&extent_hooks, decay, extents, all, &decay_extents, &extent_hooks, decay, extents, all, &decay_extents,
is_background_thread); is_background_thread);
assert(npurged == npurge); assert(npurged == npurge);
@ -872,7 +874,7 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
is_background_thread); is_background_thread);
UNUSED size_t npages_new; size_t npages_new;
if (epoch_advanced) { if (epoch_advanced) {
/* Backlog is updated on epoch advance. */ /* Backlog is updated on epoch advance. */
npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
@ -1508,7 +1510,7 @@ arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
} }
static void static void
arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
bin_t *bin) { bin_t *bin) {
assert(extent_nfree_get(slab) > 0); assert(extent_nfree_get(slab) > 0);

View File

@ -4,6 +4,8 @@
#include "jemalloc/internal/assert.h" #include "jemalloc/internal/assert.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
@ -78,7 +80,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
} }
static inline bool static inline bool
set_current_thread_affinity(UNUSED int cpu) { set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) #if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset; cpu_set_t cpuset;
CPU_ZERO(&cpuset); CPU_ZERO(&cpuset);

136
src/ctl.c
View File

@ -1392,8 +1392,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \ #define CTL_RO_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
@ -1435,8 +1435,8 @@ label_return: \
*/ */
#define CTL_RO_NL_CGEN(c, n, v, t) \ #define CTL_RO_NL_CGEN(c, n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
@ -1454,8 +1454,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \ #define CTL_RO_NL_GEN(n, v, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
@ -1489,8 +1489,8 @@ label_return: \
#define CTL_RO_CONFIG_GEN(n, t) \ #define CTL_RO_CONFIG_GEN(n, t) \
static int \ static int \
n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
size_t *oldlenp, void *newp, size_t newlen) { \ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
@ -1508,8 +1508,8 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int static int
epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
UNUSED uint64_t newval; UNUSED uint64_t newval;
@ -1527,8 +1527,9 @@ label_return:
} }
static int static int
background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, background_thread_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
@ -1578,8 +1579,9 @@ label_return:
} }
static int static int
max_background_threads_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
size_t oldval; size_t oldval;
@ -1691,8 +1693,8 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/ /******************************************************************************/
static int static int
thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
arena_t *oldarena; arena_t *oldarena;
unsigned newind, oldind; unsigned newind, oldind;
@ -1756,8 +1758,9 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *) tsd_thread_deallocatedp_get, uint64_t *)
static int static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
@ -1777,8 +1780,9 @@ label_return:
} }
static int static int
thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
if (!tcache_available(tsd)) { if (!tcache_available(tsd)) {
@ -1797,8 +1801,9 @@ label_return:
} }
static int static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
if (!config_prof) { if (!config_prof) {
@ -1828,8 +1833,9 @@ label_return:
} }
static int static int
thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
@ -1858,8 +1864,8 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned tcache_ind; unsigned tcache_ind;
@ -1876,8 +1882,8 @@ label_return:
} }
static int static int
tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned tcache_ind; unsigned tcache_ind;
@ -1896,8 +1902,8 @@ label_return:
} }
static int static int
tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned tcache_ind; unsigned tcache_ind;
@ -2299,8 +2305,9 @@ label_return:
} }
static int static int
arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
arena_t *arena; arena_t *arena;
@ -2335,7 +2342,8 @@ label_return:
} }
static const ctl_named_node_t * static const ctl_named_node_t *
arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
const ctl_named_node_t *ret; const ctl_named_node_t *ret;
malloc_mutex_lock(tsdn, &ctl_mtx); malloc_mutex_lock(tsdn, &ctl_mtx);
@ -2360,8 +2368,8 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
unsigned narenas; unsigned narenas;
@ -2381,8 +2389,9 @@ label_return:
} }
static int static int
arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen, bool dirty) {
int ret; int ret;
if (oldp != NULL && oldlenp != NULL) { if (oldp != NULL && oldlenp != NULL) {
@ -2430,7 +2439,8 @@ CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t i) {
if (i > NBINS) { if (i > NBINS) {
return NULL; return NULL;
} }
@ -2441,8 +2451,8 @@ CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
size_t) size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
size_t i) { size_t miblen, size_t i) {
if (i > NSIZES - NBINS) { if (i > NSIZES - NBINS) {
return NULL; return NULL;
} }
@ -2450,8 +2460,8 @@ arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
} }
static int static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
extent_hooks_t *extent_hooks; extent_hooks_t *extent_hooks;
unsigned arena_ind; unsigned arena_ind;
@ -2473,8 +2483,9 @@ label_return:
} }
static int static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
unsigned arena_ind; unsigned arena_ind;
void *ptr; void *ptr;
@ -2505,8 +2516,9 @@ label_return:
/******************************************************************************/ /******************************************************************************/
static int static int
prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
@ -2532,8 +2544,8 @@ label_return:
} }
static int static int
prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
@ -2558,8 +2570,8 @@ label_return:
} }
static int static int
prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
@ -2581,8 +2593,8 @@ label_return:
} }
static int static int
prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
bool oldval; bool oldval;
@ -2607,8 +2619,8 @@ label_return:
} }
static int static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
size_t *oldlenp, void *newp, size_t newlen) { void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret; int ret;
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
@ -2764,8 +2776,9 @@ RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
/* Resets all mutex stats, including global, arena and bin mutexes. */ /* Resets all mutex stats, including global, arena and bin mutexes. */
static int static int
stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) { size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
if (!config_stats) { if (!config_stats) {
return ENOENT; return ENOENT;
} }
@ -2834,8 +2847,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
size_t j) { size_t miblen, size_t j) {
if (j > NBINS) { if (j > NBINS) {
return NULL; return NULL;
} }
@ -2855,8 +2868,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
size_t j) { size_t miblen, size_t j) {
if (j > NSIZES - NBINS) { if (j > NSIZES - NBINS) {
return NULL; return NULL;
} }
@ -2864,7 +2877,8 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
} }
static const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t i) {
const ctl_named_node_t *ret; const ctl_named_node_t *ret;
size_t a; size_t a;

View File

@ -119,9 +119,13 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena,
/******************************************************************************/ /******************************************************************************/
ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, #define ATTR_NONE /* does nothing */
ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
extent_esnead_comp) extent_esnead_comp)
#undef ATTR_NONE
typedef enum { typedef enum {
lock_result_success, lock_result_success,
lock_result_failure, lock_result_failure,

View File

@ -970,6 +970,14 @@ malloc_conf_init(void) {
} \ } \
continue; \ continue; \
} }
/*
* One of the CONF_MIN macros below expands, in one of the use points,
* to "unsigned integer < 0", which is always false, triggering the
* GCC -Wtype-limits warning, which we disable here and re-enable below.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
#define CONF_MIN_no(um, min) false #define CONF_MIN_no(um, min) false
#define CONF_MIN_yes(um, min) ((um) < (min)) #define CONF_MIN_yes(um, min) ((um) < (min))
#define CONF_MAX_no(um, max) false #define CONF_MAX_no(um, max) false
@ -1246,6 +1254,8 @@ malloc_conf_init(void) {
#undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P #undef CONF_HANDLE_CHAR_P
/* Re-enable diagnostic "-Wtype-limits" */
JEMALLOC_DIAGNOSTIC_POP
} }
if (opt_abort_conf && had_conf_error) { if (opt_abort_conf && had_conf_error) {
malloc_abort_invalid_conf(); malloc_abort_invalid_conf();
@ -2992,7 +3002,7 @@ label_not_resized:
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure) JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, UNUSED int flags) { je_sallocx(const void *ptr, int flags) {
size_t usize; size_t usize;
tsdn_t *tsdn; tsdn_t *tsdn;

View File

@ -46,7 +46,7 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) { malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data; mutex_prof_data_t *data = &mutex->prof_data;
UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; nstime_t before = NSTIME_ZERO_INITIALIZER;
if (ncpus == 1) { if (ncpus == 1) {
goto label_spin_done; goto label_spin_done;

View File

@ -39,7 +39,7 @@ rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
/* Nodes are never deleted during normal operation. */ /* Nodes are never deleted during normal operation. */
not_reached(); not_reached();
} }
UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
rtree_node_dalloc_impl; rtree_node_dalloc_impl;
static rtree_leaf_elm_t * static rtree_leaf_elm_t *
@ -54,7 +54,7 @@ rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
/* Leaves are never deleted during normal operation. */ /* Leaves are never deleted during normal operation. */
not_reached(); not_reached();
} }
UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
rtree_leaf_dalloc_impl; rtree_leaf_dalloc_impl;
#ifdef JEMALLOC_JET #ifdef JEMALLOC_JET

View File

@ -206,7 +206,7 @@ tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
/* Lock the arena associated with the first object. */ /* Lock the arena associated with the first object. */
extent_t *extent = item_extent[0]; extent_t *extent = item_extent[0];
arena_t *locked_arena = extent_arena_get(extent); arena_t *locked_arena = extent_arena_get(extent);
UNUSED bool idump; bool idump;
if (config_prof) { if (config_prof) {
idump = false; idump = false;

View File

@ -12,6 +12,10 @@
static unsigned ncleanups; static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; __thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; __thread bool JEMALLOC_TLS_MODEL tsd_initialized = false;
@ -41,6 +45,7 @@ tsd_init_head_t tsd_init_head = {
ql_head_initializer(blocks), ql_head_initializer(blocks),
MALLOC_MUTEX_INITIALIZER MALLOC_MUTEX_INITIALIZER
}; };
tsd_wrapper_t tsd_boot_wrapper = { tsd_wrapper_t tsd_boot_wrapper = {
false, false,
TSD_INITIALIZER TSD_INITIALIZER
@ -48,6 +53,7 @@ tsd_wrapper_t tsd_boot_wrapper = {
bool tsd_booted = false; bool tsd_booted = false;
#endif #endif
JEMALLOC_DIAGNOSTIC_POP
/******************************************************************************/ /******************************************************************************/

View File

@ -34,6 +34,17 @@ TEST_BEGIN(test_alignment_errors) {
} }
TEST_END TEST_END
/*
* GCC "-Walloc-size-larger-than" warning detects when one of the memory
* allocation functions is called with a size larger than the maximum size that
* they support. Here we want to explicitly test that the allocation functions
* do indeed fail properly when this is the case, which triggers the warning.
* Therefore we disable the warning for these tests.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
TEST_BEGIN(test_oom_errors) { TEST_BEGIN(test_oom_errors) {
size_t alignment, size; size_t alignment, size;
void *p; void *p;
@ -78,6 +89,9 @@ TEST_BEGIN(test_oom_errors) {
} }
TEST_END TEST_END
/* Re-enable the "-Walloc-size-larger-than=" warning */
JEMALLOC_DIAGNOSTIC_POP
TEST_BEGIN(test_alignment_and_size) { TEST_BEGIN(test_alignment_and_size) {
#define NITER 4 #define NITER 4
size_t alignment, size, total; size_t alignment, size, total;

View File

@ -51,6 +51,16 @@ purge(void) {
"Unexpected mallctl error"); "Unexpected mallctl error");
} }
/*
* GCC "-Walloc-size-larger-than" warning detects when one of the memory
* allocation functions is called with a size larger than the maximum size that
* they support. Here we want to explicitly test that the allocation functions
* do indeed fail properly when this is the case, which triggers the warning.
* Therefore we disable the warning for these tests.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
TEST_BEGIN(test_overflow) { TEST_BEGIN(test_overflow) {
size_t largemax; size_t largemax;
@ -145,6 +155,9 @@ TEST_BEGIN(test_oom) {
} }
TEST_END TEST_END
/* Re-enable the "-Walloc-size-larger-than=" warning */
JEMALLOC_DIAGNOSTIC_POP
TEST_BEGIN(test_basic) { TEST_BEGIN(test_basic) {
#define MAXSZ (((size_t)1) << 23) #define MAXSZ (((size_t)1) << 23)
size_t sz; size_t sz;

View File

@ -1,5 +1,15 @@
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
/*
* GCC "-Walloc-size-larger-than" warning detects when one of the memory
* allocation functions is called with a size larger than the maximum size that
* they support. Here we want to explicitly test that the allocation functions
* do indeed fail properly when this is the case, which triggers the warning.
* Therefore we disable the warning for these tests.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
TEST_BEGIN(test_overflow) { TEST_BEGIN(test_overflow) {
unsigned nlextents; unsigned nlextents;
size_t mib[4]; size_t mib[4];
@ -39,6 +49,9 @@ TEST_BEGIN(test_overflow) {
} }
TEST_END TEST_END
/* Re-enable the "-Walloc-size-larger-than=" warning */
JEMALLOC_DIAGNOSTIC_POP
int int
main(void) { main(void) {
return test( return test(

View File

@ -208,6 +208,16 @@ TEST_BEGIN(test_lg_align_and_zero) {
} }
TEST_END TEST_END
/*
* GCC "-Walloc-size-larger-than" warning detects when one of the memory
* allocation functions is called with a size larger than the maximum size that
* they support. Here we want to explicitly test that the allocation functions
* do indeed fail properly when this is the case, which triggers the warning.
* Therefore we disable the warning for these tests.
*/
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_ALLOC_SIZE_LARGER_THAN
TEST_BEGIN(test_overflow) { TEST_BEGIN(test_overflow) {
size_t largemax; size_t largemax;
void *p; void *p;
@ -234,6 +244,9 @@ TEST_BEGIN(test_overflow) {
} }
TEST_END TEST_END
/* Re-enable the "-Walloc-size-larger-than=" warning */
JEMALLOC_DIAGNOSTIC_POP
int int
main(void) { main(void) {
return test( return test(

View File

@ -347,11 +347,11 @@ static void
emit_table_row(emitter_t *emitter) { emit_table_row(emitter_t *emitter) {
emitter_begin(emitter); emitter_begin(emitter);
emitter_row_t row; emitter_row_t row;
emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title}; emitter_col_t abc = {emitter_justify_left, 10, emitter_type_title, {0}, {0, 0}};
abc.str_val = "ABC title"; abc.str_val = "ABC title";
emitter_col_t def = {emitter_justify_right, 15, emitter_type_title}; emitter_col_t def = {emitter_justify_right, 15, emitter_type_title, {0}, {0, 0}};
def.str_val = "DEF title"; def.str_val = "DEF title";
emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title}; emitter_col_t ghi = {emitter_justify_right, 5, emitter_type_title, {0}, {0, 0}};
ghi.str_val = "GHI"; ghi.str_val = "GHI";
emitter_row_init(&row); emitter_row_init(&row);