Enabled -Wstrict-prototypes and fixed warnings.

This commit is contained in:
Qi Wang 2023-07-05 13:33:34 -07:00 committed by Qi Wang
parent ebd7e99f5c
commit 602edd7566
42 changed files with 82 additions and 80 deletions

View File

@ -262,6 +262,7 @@ if test "x$GCC" = "xyes" ; then
dnl This one too. dnl This one too.
JE_CFLAGS_ADD([-Wno-missing-field-initializers]) JE_CFLAGS_ADD([-Wno-missing-field-initializers])
JE_CFLAGS_ADD([-Wno-missing-attributes]) JE_CFLAGS_ADD([-Wno-missing-attributes])
JE_CFLAGS_ADD([-Wstrict-prototypes])
JE_CFLAGS_ADD([-pipe]) JE_CFLAGS_ADD([-pipe])
JE_CFLAGS_ADD([-g3]) JE_CFLAGS_ADD([-g3])
elif test "x$je_cv_msvc" = "xyes" ; then elif test "x$je_cv_msvc" = "xyes" ; then

View File

@ -53,7 +53,7 @@ bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
#ifdef PAGES_CAN_PURGE_FORCED #ifdef PAGES_CAN_PURGE_FORCED
bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length); bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
#endif #endif
bool ehooks_default_split_impl(); bool ehooks_default_split_impl(void);
/* /*
* Merge is the only default extent hook we declare -- see the comment in * Merge is the only default extent hook we declare -- see the comment in
* ehooks_merge. * ehooks_merge.

View File

@ -144,7 +144,7 @@ struct hook_ralloc_args_s {
* Returns an opaque handle to be used when removing the hook. NULL means that * Returns an opaque handle to be used when removing the hook. NULL means that
* we couldn't install the hook. * we couldn't install the hook.
*/ */
bool hook_boot(); bool hook_boot(void);
void *hook_install(tsdn_t *tsdn, hooks_t *hooks); void *hook_install(tsdn_t *tsdn, hooks_t *hooks);
/* Uninstalls the hook with the handle previously returned from hook_install. */ /* Uninstalls the hook with the handle previously returned from hook_install. */

View File

@ -143,7 +143,7 @@ struct hpa_shard_s {
* is not necessarily a guarantee that it backs its allocations by hugepages, * is not necessarily a guarantee that it backs its allocations by hugepages,
* just that it can function properly given the system it's running on. * just that it can function properly given the system it's running on.
*/ */
bool hpa_supported(); bool hpa_supported(void);
bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks); bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap, bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
base_t *base, edata_cache_t *edata_cache, unsigned ind, base_t *base, edata_cache_t *edata_cache, unsigned ind,

View File

@ -19,7 +19,7 @@ bool prof_bt_keycomp(const void *k1, const void *k2);
bool prof_data_init(tsd_t *tsd); bool prof_data_init(tsd_t *tsd);
prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name); int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
void prof_unbias_map_init(); void prof_unbias_map_init(void);
void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque, void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
prof_tdata_t *tdata, bool leakcheck); prof_tdata_t *tdata, bool leakcheck);
prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,

View File

@ -51,16 +51,16 @@ extern size_t lg_prof_sample;
extern bool prof_booted; extern bool prof_booted;
void prof_backtrace_hook_set(prof_backtrace_hook_t hook); void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
prof_backtrace_hook_t prof_backtrace_hook_get(); prof_backtrace_hook_t prof_backtrace_hook_get(void);
void prof_dump_hook_set(prof_dump_hook_t hook); void prof_dump_hook_set(prof_dump_hook_t hook);
prof_dump_hook_t prof_dump_hook_get(); prof_dump_hook_t prof_dump_hook_get(void);
void prof_sample_hook_set(prof_sample_hook_t hook); void prof_sample_hook_set(prof_sample_hook_t hook);
prof_sample_hook_t prof_sample_hook_get(); prof_sample_hook_t prof_sample_hook_get(void);
void prof_sample_free_hook_set(prof_sample_free_hook_t hook); void prof_sample_free_hook_set(prof_sample_free_hook_t hook);
prof_sample_free_hook_t prof_sample_free_hook_get(); prof_sample_free_hook_t prof_sample_free_hook_get(void);
/* Functions only accessed in prof_inlines.h */ /* Functions only accessed in prof_inlines.h */
prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_init(tsd_t *tsd);

View File

@ -7,7 +7,7 @@
#include "jemalloc/internal/jemalloc_internal_inlines_c.h" #include "jemalloc/internal/jemalloc_internal_inlines_c.h"
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_active_assert() { prof_active_assert(void) {
cassert(config_prof); cassert(config_prof);
/* /*
* If opt_prof is off, then prof_active must always be off, regardless * If opt_prof is off, then prof_active must always be off, regardless

View File

@ -7,7 +7,7 @@ extern malloc_mutex_t prof_recent_dump_mtx;
bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx); bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx);
void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize); void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata); void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
bool prof_recent_init(); bool prof_recent_init(void);
void edata_prof_recent_alloc_init(edata_t *edata); void edata_prof_recent_alloc_init(edata_t *edata);
/* Used in unit tests. */ /* Used in unit tests. */
@ -16,7 +16,7 @@ extern prof_recent_list_t prof_recent_alloc_list;
edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node); edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node);
prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata); prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
ssize_t prof_recent_alloc_max_ctl_read(); ssize_t prof_recent_alloc_max_ctl_read(void);
ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max); ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max);
void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque); void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque);

View File

@ -6,8 +6,8 @@ extern base_t *prof_base;
void bt_init(prof_bt_t *bt, void **vec); void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(tsd_t *tsd, prof_bt_t *bt); void prof_backtrace(tsd_t *tsd, prof_bt_t *bt);
void prof_hooks_init(); void prof_hooks_init(void);
void prof_unwind_init(); void prof_unwind_init(void);
void prof_sys_thread_name_fetch(tsd_t *tsd); void prof_sys_thread_name_fetch(tsd_t *tsd);
int prof_getpid(void); int prof_getpid(void);
void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind); void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
@ -24,7 +24,7 @@ typedef int (prof_dump_open_file_t)(const char *, int);
extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file; extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file;
typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t); typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t);
extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file; extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file;
typedef int (prof_dump_open_maps_t)(); typedef int (prof_dump_open_maps_t)(void);
extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps; extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps;
#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */ #endif /* JEMALLOC_INTERNAL_PROF_SYS_H */

View File

@ -20,7 +20,7 @@ struct san_bump_alloc_s {
}; };
static inline bool static inline bool
san_bump_enabled() { san_bump_enabled(void) {
/* /*
* We enable san_bump allocator only when it's possible to break up a * We enable san_bump allocator only when it's possible to break up a
* mapping and unmap a part of it (maps_coalesce). This is needed to * mapping and unmap a part of it (maps_coalesce). This is needed to

View File

@ -8,7 +8,7 @@ typedef struct {
} spin_t; } spin_t;
static inline void static inline void
spin_cpu_spinwait() { spin_cpu_spinwait(void) {
# if HAVE_CPU_SPINWAIT # if HAVE_CPU_SPINWAIT
CPU_SPINWAIT; CPU_SPINWAIT;
# else # else

View File

@ -1,8 +1,8 @@
#ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H #ifndef JEMALLOC_INTERNAL_TEST_HOOKS_H
#define JEMALLOC_INTERNAL_TEST_HOOKS_H #define JEMALLOC_INTERNAL_TEST_HOOKS_H
extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(); extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)(void);
extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(); extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)(void);
#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST) #if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST)
# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) # define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)

View File

@ -177,7 +177,7 @@ void tsd_postfork_child(tsd_t *tsd);
*/ */
void tsd_global_slow_inc(tsdn_t *tsdn); void tsd_global_slow_inc(tsdn_t *tsdn);
void tsd_global_slow_dec(tsdn_t *tsdn); void tsd_global_slow_dec(tsdn_t *tsdn);
bool tsd_global_slow(); bool tsd_global_slow(void);
#define TSD_MIN_INIT_STATE_MAX_FETCHED (128) #define TSD_MIN_INIT_STATE_MAX_FETCHED (128)

View File

@ -159,7 +159,7 @@ ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
#endif #endif
bool bool
ehooks_default_split_impl() { ehooks_default_split_impl(void) {
if (!maps_coalesce) { if (!maps_coalesce) {
/* /*
* Without retain, only whole regions can be purged (required by * Without retain, only whole regions can be purged (required by

View File

@ -19,7 +19,7 @@ static seq_hooks_t hooks[HOOK_MAX];
static malloc_mutex_t hooks_mu; static malloc_mutex_t hooks_mu;
bool bool
hook_boot() { hook_boot(void) {
return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK, return malloc_mutex_init(&hooks_mu, "hooks", WITNESS_RANK_HOOK,
malloc_mutex_rank_exclusive); malloc_mutex_rank_exclusive);
} }
@ -100,7 +100,7 @@ for (int for_each_hook_counter = 0; \
} }
static bool * static bool *
hook_reentrantp() { hook_reentrantp(void) {
/* /*
* We prevent user reentrancy within hooks. This is basically just a * We prevent user reentrancy within hooks. This is basically just a
* thread-local bool that triggers an early-exit. * thread-local bool that triggers an early-exit.

View File

@ -24,7 +24,7 @@ static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self); static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
bool bool
hpa_supported() { hpa_supported(void) {
#ifdef _WIN32 #ifdef _WIN32
/* /*
* At least until the API and implementation is somewhat settled, we * At least until the API and implementation is somewhat settled, we

View File

@ -766,7 +766,7 @@ malloc_ncpus(void) {
* Since otherwise tricky things is possible with percpu arenas in use. * Since otherwise tricky things is possible with percpu arenas in use.
*/ */
static bool static bool
malloc_cpu_count_is_deterministic() malloc_cpu_count_is_deterministic(void)
{ {
#ifdef _WIN32 #ifdef _WIN32
return true; return true;
@ -1807,7 +1807,7 @@ malloc_init_hard_needed(void) {
} }
static bool static bool
malloc_init_hard_a0_locked() { malloc_init_hard_a0_locked(void) {
malloc_initializer = INITIALIZER; malloc_initializer = INITIALIZER;
JEMALLOC_DIAGNOSTIC_PUSH JEMALLOC_DIAGNOSTIC_PUSH

View File

@ -66,7 +66,7 @@ static int madvise_dont_need_zeros_is_faulty = -1;
* *
* [1]: https://patchwork.kernel.org/patch/10576637/ * [1]: https://patchwork.kernel.org/patch/10576637/
*/ */
static int madvise_MADV_DONTNEED_zeroes_pages() static int madvise_MADV_DONTNEED_zeroes_pages(void)
{ {
size_t size = PAGE; size_t size = PAGE;

View File

@ -562,7 +562,7 @@ prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
} }
prof_backtrace_hook_t prof_backtrace_hook_t
prof_backtrace_hook_get() { prof_backtrace_hook_get(void) {
return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook, return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
ATOMIC_ACQUIRE); ATOMIC_ACQUIRE);
} }
@ -573,7 +573,7 @@ prof_dump_hook_set(prof_dump_hook_t hook) {
} }
prof_dump_hook_t prof_dump_hook_t
prof_dump_hook_get() { prof_dump_hook_get(void) {
return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook, return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook,
ATOMIC_ACQUIRE); ATOMIC_ACQUIRE);
} }
@ -584,7 +584,7 @@ prof_sample_hook_set(prof_sample_hook_t hook) {
} }
prof_sample_hook_t prof_sample_hook_t
prof_sample_hook_get() { prof_sample_hook_get(void) {
return (prof_sample_hook_t)atomic_load_p(&prof_sample_hook, return (prof_sample_hook_t)atomic_load_p(&prof_sample_hook,
ATOMIC_ACQUIRE); ATOMIC_ACQUIRE);
} }
@ -595,7 +595,7 @@ prof_sample_free_hook_set(prof_sample_free_hook_t hook) {
} }
prof_sample_free_hook_t prof_sample_free_hook_t
prof_sample_free_hook_get() { prof_sample_free_hook_get(void) {
return (prof_sample_free_hook_t)atomic_load_p(&prof_sample_free_hook, return (prof_sample_free_hook_t)atomic_load_p(&prof_sample_free_hook,
ATOMIC_ACQUIRE); ATOMIC_ACQUIRE);
} }

View File

@ -503,7 +503,7 @@ prof_double_uint64_cast(double d) {
} }
#endif #endif
void prof_unbias_map_init() { void prof_unbias_map_init(void) {
/* See the comment in prof_sample_new_event_wait */ /* See the comment in prof_sample_new_event_wait */
#ifdef JEMALLOC_PROF #ifdef JEMALLOC_PROF
for (szind_t i = 0; i < SC_NSIZES; i++) { for (szind_t i = 0; i < SC_NSIZES; i++) {

View File

@ -16,13 +16,13 @@ prof_recent_list_t prof_recent_alloc_list;
malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */ malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
static void static void
prof_recent_alloc_max_init() { prof_recent_alloc_max_init(void) {
atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max, atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
ATOMIC_RELAXED); ATOMIC_RELAXED);
} }
static inline ssize_t static inline ssize_t
prof_recent_alloc_max_get_no_lock() { prof_recent_alloc_max_get_no_lock(void) {
return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED); return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);
} }
@ -403,7 +403,7 @@ label_rollback:
} }
ssize_t ssize_t
prof_recent_alloc_max_ctl_read() { prof_recent_alloc_max_ctl_read(void) {
cassert(config_prof); cassert(config_prof);
/* Don't bother to acquire the lock. */ /* Don't bother to acquire the lock. */
return prof_recent_alloc_max_get_no_lock(); return prof_recent_alloc_max_get_no_lock();
@ -582,7 +582,7 @@ prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
#undef PROF_RECENT_PRINT_BUFSIZE #undef PROF_RECENT_PRINT_BUFSIZE
bool bool
prof_recent_init() { prof_recent_init(void) {
cassert(config_prof); cassert(config_prof);
prof_recent_alloc_max_init(); prof_recent_alloc_max_init();

View File

@ -428,7 +428,7 @@ prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
} }
void void
prof_hooks_init() { prof_hooks_init(void) {
prof_backtrace_hook_set(&prof_backtrace_impl); prof_backtrace_hook_set(&prof_backtrace_impl);
prof_dump_hook_set(NULL); prof_dump_hook_set(NULL);
prof_sample_hook_set(NULL); prof_sample_hook_set(NULL);
@ -436,7 +436,7 @@ prof_hooks_init() {
} }
void void
prof_unwind_init() { prof_unwind_init(void) {
#ifdef JEMALLOC_PROF_LIBGCC #ifdef JEMALLOC_PROF_LIBGCC
/* /*
* Cause the backtracing machinery to allocate its internal * Cause the backtracing machinery to allocate its internal
@ -596,7 +596,7 @@ prof_open_maps_internal(const char *format, ...) {
#endif #endif
static int static int
prof_dump_open_maps_impl() { prof_dump_open_maps_impl(void) {
int mfd; int mfd;
cassert(config_prof); cassert(config_prof);

View File

@ -6,7 +6,7 @@
* from outside the generated library, so that we can use them in test code. * from outside the generated library, so that we can use them in test code.
*/ */
JEMALLOC_EXPORT JEMALLOC_EXPORT
void (*test_hooks_arena_new_hook)() = NULL; void (*test_hooks_arena_new_hook)(void) = NULL;
JEMALLOC_EXPORT JEMALLOC_EXPORT
void (*test_hooks_libc_hook)() = NULL; void (*test_hooks_libc_hook)(void) = NULL;

View File

@ -148,7 +148,7 @@ tsd_local_slow(tsd_t *tsd) {
} }
bool bool
tsd_global_slow() { tsd_global_slow(void) {
return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0; return atomic_load_u32(&tsd_global_slow_count, ATOMIC_RELAXED) > 0;
} }

View File

@ -29,7 +29,7 @@ do_print(const char *name, size_t sz_bytes) {
} }
int int
main() { main(void) {
#define P(type) \ #define P(type) \
do_print(#type, sizeof(type)) do_print(#type, sizeof(type))
P(arena_t); P(arena_t);

View File

@ -40,8 +40,8 @@ compare_funcs(uint64_t nwarmup, uint64_t niter, const char *name_a,
return; return;
} }
time_func(&timer_a, nwarmup, niter, (void (*)())func_a); time_func(&timer_a, nwarmup, niter, (void (*)(void))func_a);
time_func(&timer_b, nwarmup, niter, (void (*)())func_b); time_func(&timer_b, nwarmup, niter, (void (*)(void))func_b);
uint64_t usec_a = timer_usec(&timer_a); uint64_t usec_a = timer_usec(&timer_a);
char buf_a[FMT_NSECS_BUF_SIZE]; char buf_a[FMT_NSECS_BUF_SIZE];

View File

@ -569,7 +569,7 @@ label_test_end: \
} \ } \
} while (0) } while (0)
bool test_is_reentrant(); bool test_is_reentrant(void);
void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);

View File

@ -35,7 +35,7 @@ reentrancy_t_str(reentrancy_t r) {
} }
static void static void
do_hook(bool *hook_ran, void (**hook)()) { do_hook(bool *hook_ran, void (**hook)(void)) {
*hook_ran = true; *hook_ran = true;
*hook = NULL; *hook = NULL;
@ -47,12 +47,12 @@ do_hook(bool *hook_ran, void (**hook)()) {
} }
static void static void
libc_reentrancy_hook() { libc_reentrancy_hook(void) {
do_hook(&libc_hook_ran, &test_hooks_libc_hook); do_hook(&libc_hook_ran, &test_hooks_libc_hook);
} }
static void static void
arena_new_reentrancy_hook() { arena_new_reentrancy_hook(void) {
do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook); do_hook(&arena_new_hook_ran, &test_hooks_arena_new_hook);
} }

View File

@ -124,12 +124,12 @@ compare_with_free(size_t batch, size_t iter,
} }
static void static void
batch_alloc_without_free_tiny() { batch_alloc_without_free_tiny(void) {
batch_alloc_without_free(TINY_BATCH); batch_alloc_without_free(TINY_BATCH);
} }
static void static void
item_alloc_without_free_tiny() { item_alloc_without_free_tiny(void) {
item_alloc_without_free(TINY_BATCH); item_alloc_without_free(TINY_BATCH);
} }
@ -140,12 +140,12 @@ TEST_BEGIN(test_tiny_batch_without_free) {
TEST_END TEST_END
static void static void
batch_alloc_with_free_tiny() { batch_alloc_with_free_tiny(void) {
batch_alloc_with_free(TINY_BATCH); batch_alloc_with_free(TINY_BATCH);
} }
static void static void
item_alloc_with_free_tiny() { item_alloc_with_free_tiny(void) {
item_alloc_with_free(TINY_BATCH); item_alloc_with_free(TINY_BATCH);
} }
@ -156,12 +156,12 @@ TEST_BEGIN(test_tiny_batch_with_free) {
TEST_END TEST_END
static void static void
batch_alloc_without_free_huge() { batch_alloc_without_free_huge(void) {
batch_alloc_without_free(HUGE_BATCH); batch_alloc_without_free(HUGE_BATCH);
} }
static void static void
item_alloc_without_free_huge() { item_alloc_without_free_huge(void) {
item_alloc_without_free(HUGE_BATCH); item_alloc_without_free(HUGE_BATCH);
} }
@ -172,12 +172,12 @@ TEST_BEGIN(test_huge_batch_without_free) {
TEST_END TEST_END
static void static void
batch_alloc_with_free_huge() { batch_alloc_with_free_huge(void) {
batch_alloc_with_free(HUGE_BATCH); batch_alloc_with_free(HUGE_BATCH);
} }
static void static void
item_alloc_with_free_huge() { item_alloc_with_free_huge(void) {
item_alloc_with_free(HUGE_BATCH); item_alloc_with_free(HUGE_BATCH);
} }

View File

@ -16,13 +16,13 @@ test_double_free_pre(void) {
} }
static void static void
test_double_free_post() { test_double_free_post(void) {
expect_b_eq(fake_abort_called, true, "Double-free check didn't fire."); expect_b_eq(fake_abort_called, true, "Double-free check didn't fire.");
safety_check_set_abort(NULL); safety_check_set_abort(NULL);
} }
static bool static bool
tcache_enabled() { tcache_enabled(void) {
bool enabled; bool enabled;
size_t sz = sizeof(enabled); size_t sz = sizeof(enabled);
assert_d_eq( assert_d_eq(

View File

@ -95,7 +95,7 @@ do_fork_thd(void *arg) {
#ifndef _WIN32 #ifndef _WIN32
static void static void
do_test_fork_multithreaded() { do_test_fork_multithreaded(void) {
thd_t child; thd_t child;
thd_create(&child, do_fork_thd, NULL); thd_create(&child, do_fork_thd, NULL);
do_fork_thd(NULL); do_fork_thd(NULL);

View File

@ -14,7 +14,7 @@ static uintptr_t arg_args_raw[4];
static int call_count = 0; static int call_count = 0;
static void static void
reset_args() { reset_args(void) {
arg_extra = NULL; arg_extra = NULL;
arg_type = 12345; arg_type = 12345;
arg_result = NULL; arg_result = NULL;
@ -40,7 +40,7 @@ alloc_free_size(size_t sz) {
* allocation scenarios. * allocation scenarios.
*/ */
static void static void
be_reentrant() { be_reentrant(void) {
/* Let's make sure the tcache is non-empty if enabled. */ /* Let's make sure the tcache is non-empty if enabled. */
alloc_free_size(1); alloc_free_size(1);
alloc_free_size(1024); alloc_free_size(1024);
@ -77,7 +77,7 @@ expect_args_raw(uintptr_t *args_raw_expected, int nargs) {
} }
static void static void
reset() { reset(void) {
call_count = 0; call_count = 0;
reset_args(); reset_args();
} }

View File

@ -2,7 +2,7 @@
#include "test/sleep.h" #include "test/sleep.h"
static void static void
sleep_for_background_thread_interval() { sleep_for_background_thread_interval(void) {
/* /*
* The sleep interval set in our .sh file is 50ms. So it likely will * The sleep interval set in our .sh file is 50ms. So it likely will
* run if we sleep for four times that. * run if we sleep for four times that.
@ -11,7 +11,7 @@ sleep_for_background_thread_interval() {
} }
static unsigned static unsigned
create_arena() { create_arena(void) {
unsigned arena_ind; unsigned arena_ind;
size_t sz; size_t sz;

View File

@ -7,7 +7,7 @@ static void *last_junked_ptr;
static size_t last_junked_usize; static size_t last_junked_usize;
static void static void
reset() { reset(void) {
ptr_ind = 0; ptr_ind = 0;
last_junked_ptr = NULL; last_junked_ptr = NULL;
last_junked_usize = 0; last_junked_usize = 0;

View File

@ -4,11 +4,11 @@
#define N_PARAM 100 #define N_PARAM 100
#define N_THREADS 10 #define N_THREADS 10
static void expect_rep() { static void expect_rep(void) {
expect_b_eq(prof_log_rep_check(), false, "Rep check failed"); expect_b_eq(prof_log_rep_check(), false, "Rep check failed");
} }
static void expect_log_empty() { static void expect_log_empty(void) {
expect_zu_eq(prof_log_bt_count(), 0, expect_zu_eq(prof_log_bt_count(), 0,
"The log has backtraces; it isn't empty"); "The log has backtraces; it isn't empty");
expect_zu_eq(prof_log_thr_count(), 0, expect_zu_eq(prof_log_thr_count(), 0,
@ -19,7 +19,7 @@ static void expect_log_empty() {
void *buf[N_PARAM]; void *buf[N_PARAM];
static void f() { static void f(void) {
int i; int i;
for (i = 0; i < N_PARAM; i++) { for (i = 0; i < N_PARAM; i++) {
buf[i] = malloc(100); buf[i] = malloc(100);
@ -91,18 +91,18 @@ TEST_BEGIN(test_prof_log_many_threads) {
} }
TEST_END TEST_END
static void f3() { static void f3(void) {
void *p = malloc(100); void *p = malloc(100);
free(p); free(p);
} }
static void f1() { static void f1(void) {
void *p = malloc(100); void *p = malloc(100);
f3(); f3();
free(p); free(p);
} }
static void f2() { static void f2(void) {
void *p = malloc(100); void *p = malloc(100);
free(p); free(p);
} }

View File

@ -129,7 +129,7 @@ TEST_BEGIN(test_mdump_output_error) {
TEST_END TEST_END
static int static int
prof_dump_open_maps_error() { prof_dump_open_maps_error(void) {
return -1; return -1;
} }

View File

@ -9,7 +9,7 @@ const char *test_thread_name = "test_thread";
/* Invariant before and after every test (when config_prof is on) */ /* Invariant before and after every test (when config_prof is on) */
static void static void
confirm_prof_setup() { confirm_prof_setup(void) {
/* Options */ /* Options */
assert_true(opt_prof, "opt_prof not on"); assert_true(opt_prof, "opt_prof not on");
assert_true(opt_prof_active, "opt_prof_active not on"); assert_true(opt_prof_active, "opt_prof_active not on");
@ -356,7 +356,7 @@ test_dump_write_cb(void *not_used, const char *str) {
} }
static void static void
call_dump() { call_dump(void) {
static void *in[2] = {test_dump_write_cb, NULL}; static void *in[2] = {test_dump_write_cb, NULL};
dump_out_len = 0; dump_out_len = 0;
assert_d_eq(mallctl("experimental.prof_recent.alloc_dump", assert_d_eq(mallctl("experimental.prof_recent.alloc_dump",

View File

@ -3,7 +3,7 @@
static bool hook_called = false; static bool hook_called = false;
static void static void
hook() { hook(void) {
hook_called = true; hook_called = true;
} }

View File

@ -4,7 +4,8 @@
static bool abort_called = false; static bool abort_called = false;
void set_abort_called() { void set_abort_called(const char *message) {
(void)message;
abort_called = true; abort_called = true;
}; };

View File

@ -1,7 +1,7 @@
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
static uint64_t static uint64_t
allocated() { allocated(void) {
if (!config_stats) { if (!config_stats) {
return 0; return 0;
} }
@ -13,7 +13,7 @@ allocated() {
} }
static uint64_t static uint64_t
deallocated() { deallocated(void) {
if (!config_stats) { if (!config_stats) {
return 0; return 0;
} }

View File

@ -1,7 +1,7 @@
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
static uint64_t static uint64_t
deallocated() { deallocated(void) {
if (!config_stats) { if (!config_stats) {
return 0; return 0;
} }

View File

@ -1,7 +1,7 @@
#include "test/jemalloc_test.h" #include "test/jemalloc_test.h"
static size_t static size_t
zero_reallocs() { zero_reallocs(void) {
if (!config_stats) { if (!config_stats) {
return 0; return 0;
} }