Remove extraneous parens around return arguments.

This resolves #540.
This commit is contained in:
Jason Evans 2017-01-19 18:15:45 -08:00
parent c4c2592c83
commit f408643a4c
104 changed files with 1161 additions and 1168 deletions

View File

@ -295,7 +295,7 @@ if test "x$enable_cxx" = "x1" ; then
], [[
int *arr = (int *)malloc(sizeof(int) * 42);
if (arr == NULL)
return (1);
return 1;
]], [je_cv_libstdcxx])
if test "x${je_cv_libstdcxx}" = "xno" ; then
LIBS="${SAVED_LIBS}"
@ -1659,7 +1659,7 @@ JE_COMPILABLE([C11 atomics], [
uint64_t x = 1;
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
uint64_t r = atomic_fetch_add(a, x) + x;
return (r == 0);
return r == 0;
], [je_cv_c11atomics])
if test "x${je_cv_c11atomics}" = "xyes" ; then
AC_DEFINE([JEMALLOC_C11ATOMICS])

View File

@ -15,7 +15,7 @@ bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
JEMALLOC_INLINE unsigned
arena_ind_get(const arena_t *arena) {
return (base_ind_get(arena->base));
return base_ind_get(arena->base);
}
JEMALLOC_INLINE void
@ -30,7 +30,7 @@ arena_internal_sub(arena_t *arena, size_t size) {
JEMALLOC_INLINE size_t
arena_internal_get(arena_t *arena) {
return (atomic_read_zu(&arena->stats.internal));
return atomic_read_zu(&arena->stats.internal);
}
JEMALLOC_INLINE bool
@ -41,9 +41,9 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) {
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
arena->prof_accumbytes %= prof_interval;
return (true);
return true;
}
return (false);
return false;
}
JEMALLOC_INLINE bool
@ -51,9 +51,9 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0)) {
return (false);
return false;
}
return (arena_prof_accum_impl(arena, accumbytes));
return arena_prof_accum_impl(arena, accumbytes);
}
JEMALLOC_INLINE bool
@ -61,7 +61,7 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
cassert(config_prof);
if (likely(prof_interval == 0)) {
return (false);
return false;
}
{
@ -70,7 +70,7 @@ arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
malloc_mutex_lock(tsdn, &arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
return ret;
}
}

View File

@ -26,7 +26,7 @@ JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
szind_t binind = (szind_t)(bin - arena->bins);
assert(binind < NBINS);
return (binind);
return binind;
}
JEMALLOC_INLINE prof_tctx_t *
@ -35,9 +35,9 @@ arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
assert(ptr != NULL);
if (unlikely(!extent_slab_get(extent))) {
return (large_prof_tctx_get(tsdn, extent));
return large_prof_tctx_get(tsdn, extent);
}
return ((prof_tctx_t *)(uintptr_t)1U);
return (prof_tctx_t *)(uintptr_t)1U;
}
JEMALLOC_INLINE void
@ -94,23 +94,23 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path));
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
if (likely(size <= tcache_maxclass)) {
return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path));
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
/* (size > tcache_maxclass) case falls through. */
assert(size > tcache_maxclass);
}
return (arena_malloc_hard(tsdn, arena, size, ind, zero));
return arena_malloc_hard(tsdn, arena, size, ind, zero);
}
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
return (extent_arena_get(iealloc(tsdn, ptr)));
return extent_arena_get(iealloc(tsdn, ptr));
}
/* Return the size of the allocation pointed to by ptr. */
@ -126,7 +126,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
ret = large_salloc(tsdn, extent);
}
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void

View File

@ -9,15 +9,15 @@
* operations can be optimized away if the return values aren't used by the
* callers.
*
* <t> atomic_read_<t>(<t> *p) { return (*p); }
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
* <t> atomic_read_<t>(<t> *p) { return *p; }
* <t> atomic_add_<t>(<t> *p, <t> x) { return *p += x; }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return *p -= x; }
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
* {
* if (*p != c)
* return (true);
* return true;
* *p = s;
* return (false);
* return false;
* }
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
*/
@ -62,7 +62,7 @@ atomic_add_u64(uint64_t *p, uint64_t x) {
: "m" (*p) /* Inputs. */
);
return (t + x);
return t + x;
}
JEMALLOC_INLINE uint64_t
@ -77,7 +77,7 @@ atomic_sub_u64(uint64_t *p, uint64_t x) {
: "m" (*p) /* Inputs. */
);
return (t + x);
return t + x;
}
JEMALLOC_INLINE bool
@ -92,7 +92,7 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
: "memory" /* Clobbers. */
);
return (!(bool)success);
return !(bool)success;
}
JEMALLOC_INLINE void
@ -108,19 +108,19 @@ atomic_write_u64(uint64_t *p, uint64_t x) {
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_add(a, x) + x);
return atomic_fetch_add(a, x) + x;
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (atomic_fetch_sub(a, x) - x);
return atomic_fetch_sub(a, x) - x;
}
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
return !atomic_compare_exchange_strong(a, &c, s);
}
JEMALLOC_INLINE void
@ -137,21 +137,21 @@ atomic_add_u64(uint64_t *p, uint64_t x) {
*/
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
return atomic_fetchadd_long(p, (unsigned long)x) + x;
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x) {
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
return atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x;
}
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
return !atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s);
}
JEMALLOC_INLINE void
@ -163,17 +163,17 @@ atomic_write_u64(uint64_t *p, uint64_t x) {
# elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x) {
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
return OSAtomicAdd64((int64_t)x, (int64_t *)p);
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x) {
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
return OSAtomicAdd64(-((int64_t)x), (int64_t *)p);
}
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
return !OSAtomicCompareAndSwap64(c, s, (int64_t *)p);
}
JEMALLOC_INLINE void
@ -188,12 +188,12 @@ atomic_write_u64(uint64_t *p, uint64_t x) {
# elif (defined(_MSC_VER))
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x) {
return (InterlockedExchangeAdd64(p, x) + x);
return InterlockedExchangeAdd64(p, x) + x;
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x) {
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
return InterlockedExchangeAdd64(p, -((int64_t)x)) - x;
}
JEMALLOC_INLINE bool
@ -201,7 +201,7 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
uint64_t o;
o = InterlockedCompareExchange64(p, s, c);
return (o != c);
return o != c;
}
JEMALLOC_INLINE void
@ -212,17 +212,17 @@ atomic_write_u64(uint64_t *p, uint64_t x) {
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x) {
return (__sync_add_and_fetch(p, x));
return __sync_add_and_fetch(p, x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x) {
return (__sync_sub_and_fetch(p, x));
return __sync_sub_and_fetch(p, x);
}
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
return (!__sync_bool_compare_and_swap(p, c, s));
return !__sync_bool_compare_and_swap(p, c, s);
}
JEMALLOC_INLINE void
@ -247,7 +247,7 @@ atomic_add_u32(uint32_t *p, uint32_t x) {
: "m" (*p) /* Inputs. */
);
return (t + x);
return t + x;
}
JEMALLOC_INLINE uint32_t
@ -262,7 +262,7 @@ atomic_sub_u32(uint32_t *p, uint32_t x) {
: "m" (*p) /* Inputs. */
);
return (t + x);
return t + x;
}
JEMALLOC_INLINE bool
@ -277,7 +277,7 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
: "memory"
);
return (!(bool)success);
return !(bool)success;
}
JEMALLOC_INLINE void
@ -293,19 +293,19 @@ atomic_write_u32(uint32_t *p, uint32_t x) {
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_add(a, x) + x);
return atomic_fetch_add(a, x) + x;
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (atomic_fetch_sub(a, x) - x);
return atomic_fetch_sub(a, x) - x;
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
return (!atomic_compare_exchange_strong(a, &c, s));
return !atomic_compare_exchange_strong(a, &c, s);
}
JEMALLOC_INLINE void
@ -316,17 +316,17 @@ atomic_write_u32(uint32_t *p, uint32_t x) {
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x) {
return (atomic_fetchadd_32(p, x) + x);
return atomic_fetchadd_32(p, x) + x;
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x) {
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
return atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x;
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
return (!atomic_cmpset_32(p, c, s));
return !atomic_cmpset_32(p, c, s);
}
JEMALLOC_INLINE void
@ -336,17 +336,17 @@ atomic_write_u32(uint32_t *p, uint32_t x) {
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x) {
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
return OSAtomicAdd32((int32_t)x, (int32_t *)p);
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x) {
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
return OSAtomicAdd32(-((int32_t)x), (int32_t *)p);
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
return !OSAtomicCompareAndSwap32(c, s, (int32_t *)p);
}
JEMALLOC_INLINE void
@ -361,12 +361,12 @@ atomic_write_u32(uint32_t *p, uint32_t x) {
#elif (defined(_MSC_VER))
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x) {
return (InterlockedExchangeAdd(p, x) + x);
return InterlockedExchangeAdd(p, x) + x;
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x) {
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
return InterlockedExchangeAdd(p, -((int32_t)x)) - x;
}
JEMALLOC_INLINE bool
@ -374,7 +374,7 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
uint32_t o;
o = InterlockedCompareExchange(p, s, c);
return (o != c);
return o != c;
}
JEMALLOC_INLINE void
@ -385,17 +385,17 @@ atomic_write_u32(uint32_t *p, uint32_t x) {
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x) {
return (__sync_add_and_fetch(p, x));
return __sync_add_and_fetch(p, x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x) {
return (__sync_sub_and_fetch(p, x));
return __sync_sub_and_fetch(p, x);
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
return (!__sync_bool_compare_and_swap(p, c, s));
return !__sync_bool_compare_and_swap(p, c, s);
}
JEMALLOC_INLINE void
@ -411,27 +411,27 @@ atomic_write_u32(uint32_t *p, uint32_t x) {
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x) {
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)x));
return (void *)atomic_add_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_u32((uint32_t *)p, (uint32_t)x));
return (void *)atomic_add_u32((uint32_t *)p, (uint32_t)x);
#endif
}
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x) {
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
return (void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((void *)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x)));
return (void *)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s) {
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
return atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s);
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
return atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s);
#endif
}
@ -449,27 +449,27 @@ atomic_write_p(void **p, const void *x) {
JEMALLOC_INLINE size_t
atomic_add_zu(size_t *p, size_t x) {
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x));
return (size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_u32((uint32_t *)p, (uint32_t)x));
return (size_t)atomic_add_u32((uint32_t *)p, (uint32_t)x);
#endif
}
JEMALLOC_INLINE size_t
atomic_sub_zu(size_t *p, size_t x) {
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
return (size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_PTR == 2)
return ((size_t)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x)));
return (size_t)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_zu(size_t *p, size_t c, size_t s) {
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
return atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s);
#elif (LG_SIZEOF_PTR == 2)
return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
return atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s);
#endif
}
@ -487,29 +487,27 @@ atomic_write_zu(size_t *p, size_t x) {
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x) {
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x));
return (unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_u32((uint32_t *)p, (uint32_t)x));
return (unsigned)atomic_add_u32((uint32_t *)p, (uint32_t)x);
#endif
}
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x) {
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_u64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
return (unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x));
#elif (LG_SIZEOF_INT == 2)
return ((unsigned)atomic_add_u32((uint32_t *)p,
(uint32_t)-((int32_t)x)));
return (unsigned)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x));
#endif
}
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s) {
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
return atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s);
#elif (LG_SIZEOF_INT == 2)
return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
return atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s);
#endif
}

View File

@ -8,7 +8,7 @@ unsigned base_ind_get(const base_t *base);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BASE_C_))
JEMALLOC_INLINE unsigned
base_ind_get(const base_t *base) {
return (base->ind);
return base->ind;
}
#endif

View File

@ -22,10 +22,10 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
for (i = 0; i < binfo->ngroups; i++) {
if (bitmap[i] != 0) {
return (false);
return false;
}
}
return (true);
return true;
#endif
}
@ -37,7 +37,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
assert(bit < binfo->nbits);
goff = bit >> LG_BITMAP_GROUP_NBITS;
g = bitmap[goff];
return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))));
return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
}
JEMALLOC_INLINE void
@ -103,7 +103,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
return bit;
}
JEMALLOC_INLINE void

View File

@ -47,65 +47,65 @@ extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return (rtree_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
dependent));
return rtree_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
dependent);
}
JEMALLOC_INLINE arena_t *
extent_arena_get(const extent_t *extent) {
return (extent->e_arena);
return extent->e_arena;
}
JEMALLOC_INLINE void *
extent_base_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (PAGE_ADDR2BASE(extent->e_addr));
return PAGE_ADDR2BASE(extent->e_addr);
}
JEMALLOC_INLINE void *
extent_addr_get(const extent_t *extent) {
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (extent->e_addr);
return extent->e_addr;
}
JEMALLOC_INLINE size_t
extent_size_get(const extent_t *extent) {
return (extent->e_size);
return extent->e_size;
}
JEMALLOC_INLINE size_t
extent_usize_get(const extent_t *extent) {
assert(!extent->e_slab);
return (extent->e_usize);
return extent->e_usize;
}
JEMALLOC_INLINE void *
extent_before_get(const extent_t *extent) {
return ((void *)((uintptr_t)extent_base_get(extent) - PAGE));
return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
}
JEMALLOC_INLINE void *
extent_last_get(const extent_t *extent) {
return ((void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE));
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE);
}
JEMALLOC_INLINE void *
extent_past_get(const extent_t *extent) {
return ((void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent)));
return (void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent));
}
JEMALLOC_INLINE size_t
extent_sn_get(const extent_t *extent) {
return (extent->e_sn);
return extent->e_sn;
}
JEMALLOC_INLINE bool
extent_active_get(const extent_t *extent) {
return (extent->e_active);
return extent->e_active;
}
JEMALLOC_INLINE bool
@ -115,35 +115,35 @@ extent_retained_get(const extent_t *extent) {
JEMALLOC_INLINE bool
extent_zeroed_get(const extent_t *extent) {
return (extent->e_zeroed);
return extent->e_zeroed;
}
JEMALLOC_INLINE bool
extent_committed_get(const extent_t *extent) {
return (extent->e_committed);
return extent->e_committed;
}
JEMALLOC_INLINE bool
extent_slab_get(const extent_t *extent) {
return (extent->e_slab);
return extent->e_slab;
}
JEMALLOC_INLINE arena_slab_data_t *
extent_slab_data_get(extent_t *extent) {
assert(extent->e_slab);
return (&extent->e_slab_data);
return &extent->e_slab_data;
}
JEMALLOC_INLINE const arena_slab_data_t *
extent_slab_data_get_const(const extent_t *extent) {
assert(extent->e_slab);
return (&extent->e_slab_data);
return &extent->e_slab_data;
}
JEMALLOC_INLINE prof_tctx_t *
extent_prof_tctx_get(const extent_t *extent) {
return ((prof_tctx_t *)atomic_read_p(
&((extent_t *)extent)->e_prof_tctx_pun));
return (prof_tctx_t *)atomic_read_p(
&((extent_t *)extent)->e_prof_tctx_pun);
}
JEMALLOC_INLINE void
@ -251,7 +251,7 @@ extent_sn_comp(const extent_t *a, const extent_t *b) {
size_t a_sn = extent_sn_get(a);
size_t b_sn = extent_sn_get(b);
return ((a_sn > b_sn) - (a_sn < b_sn));
return (a_sn > b_sn) - (a_sn < b_sn);
}
JEMALLOC_INLINE int
@ -259,7 +259,7 @@ extent_ad_comp(const extent_t *a, const extent_t *b) {
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
return ((a_addr > b_addr) - (a_addr < b_addr));
return (a_addr > b_addr) - (a_addr < b_addr);
}
JEMALLOC_INLINE int
@ -268,11 +268,11 @@ extent_snad_comp(const extent_t *a, const extent_t *b) {
ret = extent_sn_comp(a, b);
if (ret != 0) {
return (ret);
return ret;
}
ret = extent_ad_comp(a, b);
return (ret);
return ret;
}
#endif

View File

@ -37,10 +37,10 @@ hash_get_block_32(const uint32_t *p, int i) {
uint32_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
return ret;
}
return (p[i]);
return p[i];
}
JEMALLOC_INLINE uint64_t
@ -50,10 +50,10 @@ hash_get_block_64(const uint64_t *p, int i) {
uint64_t ret;
memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
return ret;
}
return (p[i]);
return p[i];
}
JEMALLOC_INLINE uint32_t
@ -64,7 +64,7 @@ hash_fmix_32(uint32_t h) {
h *= 0xc2b2ae35;
h ^= h >> 16;
return (h);
return h;
}
JEMALLOC_INLINE uint64_t
@ -75,7 +75,7 @@ hash_fmix_64(uint64_t k) {
k *= KQU(0xc4ceb9fe1a85ec53);
k ^= k >> 33;
return (k);
return k;
}
JEMALLOC_INLINE uint32_t
@ -125,7 +125,7 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
h1 = hash_fmix_32(h1);
return (h1);
return h1;
}
UNUSED JEMALLOC_INLINE void

View File

@ -552,7 +552,7 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
JEMALLOC_ALWAYS_INLINE pszind_t
psz2ind(size_t psz) {
if (unlikely(psz > LARGE_MAXCLASS)) {
return (NPSIZES);
return NPSIZES;
}
{
pszind_t x = lg_floor((psz<<1)-1);
@ -568,14 +568,14 @@ psz2ind(size_t psz) {
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
pszind_t ind = grp + mod;
return (ind);
return ind;
}
}
JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind) {
if (unlikely(pind == NPSIZES)) {
return (LARGE_MAXCLASS + PAGE);
return LARGE_MAXCLASS + PAGE;
}
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
@ -590,7 +590,7 @@ pind2sz_compute(pszind_t pind) {
size_t mod_size = (mod+1) << lg_delta;
size_t sz = grp_size + mod_size;
return (sz);
return sz;
}
}
@ -598,19 +598,19 @@ JEMALLOC_INLINE size_t
pind2sz_lookup(pszind_t pind) {
size_t ret = (size_t)pind2sz_tab[pind];
assert(ret == pind2sz_compute(pind));
return (ret);
return ret;
}
JEMALLOC_INLINE size_t
pind2sz(pszind_t pind) {
assert(pind < NPSIZES+1);
return (pind2sz_lookup(pind));
return pind2sz_lookup(pind);
}
JEMALLOC_INLINE size_t
psz2u(size_t psz) {
if (unlikely(psz > LARGE_MAXCLASS)) {
return (LARGE_MAXCLASS + PAGE);
return LARGE_MAXCLASS + PAGE;
}
{
size_t x = lg_floor((psz<<1)-1);
@ -619,14 +619,14 @@ psz2u(size_t psz) {
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
return (usize);
return usize;
}
}
JEMALLOC_INLINE szind_t
size2index_compute(size_t size) {
if (unlikely(size > LARGE_MAXCLASS)) {
return (NSIZES);
return NSIZES;
}
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
@ -649,7 +649,7 @@ size2index_compute(size_t size) {
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
szind_t index = NTBINS + grp + mod;
return (index);
return index;
}
}
@ -659,7 +659,7 @@ size2index_lookup(size_t size) {
{
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
assert(ret == size2index_compute(size));
return (ret);
return ret;
}
}
@ -667,9 +667,9 @@ JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size) {
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS)) {
return (size2index_lookup(size));
return size2index_lookup(size);
}
return (size2index_compute(size));
return size2index_compute(size);
}
JEMALLOC_INLINE size_t
@ -694,7 +694,7 @@ index2size_compute(szind_t index) {
size_t mod_size = (mod+1) << lg_delta;
size_t usize = grp_size + mod_size;
return (usize);
return usize;
}
}
@ -702,19 +702,19 @@ JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(szind_t index) {
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index) {
assert(index < NSIZES);
return (index2size_lookup(index));
return index2size_lookup(index);
}
JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size) {
if (unlikely(size > LARGE_MAXCLASS)) {
return (0);
return 0;
}
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
@ -731,7 +731,7 @@ s2u_compute(size_t size) {
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
return (usize);
return usize;
}
}
@ -740,7 +740,7 @@ s2u_lookup(size_t size) {
size_t ret = index2size_lookup(size2index_lookup(size));
assert(ret == s2u_compute(size));
return (ret);
return ret;
}
/*
@ -751,9 +751,9 @@ JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size) {
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS)) {
return (s2u_lookup(size));
return s2u_lookup(size);
}
return (s2u_compute(size));
return s2u_compute(size);
}
/*
@ -784,14 +784,14 @@ sa2u(size_t size, size_t alignment) {
*/
usize = s2u(ALIGNMENT_CEILING(size, alignment));
if (usize < LARGE_MINCLASS) {
return (usize);
return usize;
}
}
/* Large size class. Beware of overflow. */
if (unlikely(alignment > LARGE_MAXCLASS)) {
return (0);
return 0;
}
/* Make sure result is a large size class. */
@ -801,7 +801,7 @@ sa2u(size_t size, size_t alignment) {
usize = s2u(size);
if (usize < size) {
/* size_t overflow. */
return (0);
return 0;
}
}
@ -811,9 +811,9 @@ sa2u(size_t size, size_t alignment) {
*/
if (usize + large_pad + PAGE_CEILING(alignment) - PAGE < usize) {
/* size_t overflow. */
return (0);
return 0;
}
return (usize);
return usize;
}
/* Choose an arena based on a per-thread value. */
@ -822,7 +822,7 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
arena_t *ret;
if (arena != NULL) {
return (arena);
return arena;
}
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
@ -830,17 +830,17 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
ret = arena_choose_hard(tsd, internal);
}
return (ret);
return ret;
}
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena) {
return (arena_choose_impl(tsd, arena, false));
return arena_choose_impl(tsd, arena, false);
}
JEMALLOC_INLINE arena_t *
arena_ichoose(tsd_t *tsd, arena_t *arena) {
return (arena_choose_impl(tsd, arena, true));
return arena_choose_impl(tsd, arena, true);
}
JEMALLOC_INLINE arena_tdata_t *
@ -850,7 +850,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
if (unlikely(arenas_tdata == NULL)) {
/* arenas_tdata hasn't been initialized yet. */
return (arena_tdata_get_hard(tsd, ind));
return arena_tdata_get_hard(tsd, ind);
}
if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
/*
@ -863,9 +863,9 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
tdata = &arenas_tdata[ind];
if (likely(tdata != NULL) || !refresh_if_missing) {
return (tdata);
return tdata;
}
return (arena_tdata_get_hard(tsd, ind));
return arena_tdata_get_hard(tsd, ind);
}
JEMALLOC_INLINE arena_t *
@ -882,7 +882,7 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
(extent_hooks_t *)&extent_hooks_default);
}
}
return (ret);
return ret;
}
JEMALLOC_INLINE ticker_t *
@ -891,9 +891,9 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
tdata = arena_tdata_get(tsd, ind, true);
if (unlikely(tdata == NULL)) {
return (NULL);
return NULL;
}
return (&tdata->decay_ticker);
return &tdata->decay_ticker;
}
#endif
@ -911,7 +911,7 @@ extent_t *iealloc(tsdn_t *tsdn, const void *ptr);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(tsdn_t *tsdn, const void *ptr) {
return (extent_lookup(tsdn, ptr, true));
return extent_lookup(tsdn, ptr, true);
}
#endif
@ -953,7 +953,7 @@ JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
return (arena_aalloc(tsdn, ptr));
return arena_aalloc(tsdn, ptr);
}
/*
@ -967,7 +967,7 @@ JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
assert(ptr != NULL);
return (arena_salloc(tsdn, extent, ptr));
return arena_salloc(tsdn, extent, ptr);
}
JEMALLOC_ALWAYS_INLINE void *
@ -985,13 +985,13 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
iealloc(tsdn, ret), ret));
}
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@ -1011,19 +1011,19 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn,
iealloc(tsdn, ret), ret));
}
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena) {
return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd, true), false, NULL));
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd, true), false, NULL);
}
JEMALLOC_ALWAYS_INLINE size_t
@ -1040,13 +1040,13 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
* */
extent = extent_lookup(tsdn, ptr, false);
if (extent == NULL) {
return (0);
return 0;
}
assert(extent_active_get(extent));
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
return (isalloc(tsdn, extent, ptr));
return isalloc(tsdn, extent, ptr);
}
JEMALLOC_ALWAYS_INLINE void
@ -1085,21 +1085,21 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
usize = sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL);
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0) {
return (NULL);
return NULL;
}
/* Try again, without extra this time. */
usize = sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL);
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
if (p == NULL) {
return (NULL);
return NULL;
}
}
/*
@ -1109,7 +1109,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
return (p);
return p;
}
JEMALLOC_ALWAYS_INLINE void *
@ -1124,19 +1124,19 @@ iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
return (iralloct_realign(tsdn, extent, ptr, oldsize, size, 0,
alignment, zero, tcache, arena));
return iralloct_realign(tsdn, extent, ptr, oldsize, size, 0,
alignment, zero, tcache, arena);
}
return (arena_ralloc(tsdn, arena, extent, ptr, oldsize, size, alignment,
zero, tcache));
return arena_ralloc(tsdn, arena, extent, ptr, oldsize, size, alignment,
zero, tcache);
}
JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero) {
return (iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
zero, tcache_get(tsd, true), NULL));
return iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
zero, tcache_get(tsd, true), NULL);
}
JEMALLOC_ALWAYS_INLINE bool
@ -1148,11 +1148,11 @@ ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
/* Existing object alignment is inadequate. */
return (true);
return true;
}
return (arena_ralloc_no_move(tsdn, extent, ptr, oldsize, size, extra,
zero));
return arena_ralloc_no_move(tsdn, extent, ptr, oldsize, size, extra,
zero);
}
#endif

View File

@ -218,10 +218,10 @@ a_prefix##empty(a_ph_type *ph) { \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) { \
if (ph->ph_root == NULL) { \
return (NULL); \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
return (ph->ph_root); \
return ph->ph_root; \
} \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
@ -255,7 +255,7 @@ a_prefix##remove_first(a_ph_type *ph) { \
a_type *ret; \
\
if (ph->ph_root == NULL) { \
return (NULL); \
return NULL; \
} \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
\
@ -264,7 +264,7 @@ a_prefix##remove_first(a_ph_type *ph) { \
ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
ph->ph_root); \
\
return (ret); \
return ret; \
} \
a_attr void \
a_prefix##remove(a_ph_type *ph, a_type *phn) { \

View File

@ -19,20 +19,20 @@ size_t prng_range_zu(size_t *state, size_t range, bool atomic);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state) {
return ((state * PRNG_A_32) + PRNG_C_32);
return (state * PRNG_A_32) + PRNG_C_32;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state) {
return ((state * PRNG_A_64) + PRNG_C_64);
return (state * PRNG_A_64) + PRNG_C_64;
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state) {
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
return (state * PRNG_A_32) + PRNG_C_32;
#elif LG_SIZEOF_PTR == 3
return ((state * PRNG_A_64) + PRNG_C_64);
return (state * PRNG_A_64) + PRNG_C_64;
#else
#error Unsupported pointer size
#endif
@ -58,7 +58,7 @@ prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) {
}
ret = state1 >> (32 - lg_range);
return (ret);
return ret;
}
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
@ -73,7 +73,7 @@ prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
*state = state1;
ret = state1 >> (64 - lg_range);
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
@ -96,7 +96,7 @@ prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) {
}
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint32_t
@ -114,7 +114,7 @@ prng_range_u32(uint32_t *state, uint32_t range, bool atomic) {
ret = prng_lg_range_u32(state, lg_range, atomic);
} while (ret >= range);
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
@ -132,7 +132,7 @@ prng_range_u64(uint64_t *state, uint64_t range) {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
@ -150,7 +150,7 @@ prng_range_zu(size_t *state, size_t range, bool atomic) {
ret = prng_lg_range_zu(state, lg_range, atomic);
} while (ret >= range);
return (ret);
return ret;
}
#endif

View File

@ -34,7 +34,7 @@ prof_active_get_unlocked(void) {
* prof_active in the fast path, so there are no guarantees regarding
* how long it will take for all threads to notice state changes.
*/
return (prof_active);
return prof_active;
}
JEMALLOC_ALWAYS_INLINE bool
@ -44,7 +44,7 @@ prof_gdump_get_unlocked(void) {
* there are no guarantees regarding how long it will take for all
* threads to notice state changes.
*/
return (prof_gdump_val);
return prof_gdump_val;
}
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
@ -67,7 +67,7 @@ prof_tdata_get(tsd_t *tsd, bool create) {
assert(tdata == NULL || tdata->attached);
}
return (tdata);
return tdata;
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
@ -75,7 +75,7 @@ prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
return (arena_prof_tctx_get(tsdn, extent, ptr));
return arena_prof_tctx_get(tsdn, extent, ptr);
}
JEMALLOC_ALWAYS_INLINE void
@ -113,20 +113,20 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
}
if (unlikely(tdata == NULL)) {
return (true);
return true;
}
if (likely(tdata->bytes_until_sample >= usize)) {
if (update) {
tdata->bytes_until_sample -= usize;
}
return (true);
return true;
} else {
/* Compute new sample threshold. */
if (update) {
prof_sample_threshold_update(tdata);
}
return (!tdata->active);
return !tdata->active;
}
}
@ -147,7 +147,7 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
ret = prof_lookup(tsd, &bt);
}
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void

View File

@ -348,13 +348,13 @@ a_attr a_type * \
a_prefix##first(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##last(a_rbt_type *rbtree) { \
a_type *ret; \
rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
@ -379,7 +379,7 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \
assert(tnode != NULL); \
} \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
@ -404,7 +404,7 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \
assert(tnode != NULL); \
} \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
@ -419,7 +419,7 @@ a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \
ret = rbtn_right_get(a_type, a_field, ret); \
} \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
@ -438,7 +438,7 @@ a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
@ -457,7 +457,7 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
break; \
} \
} \
return (ret); \
return ret; \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
@ -872,16 +872,16 @@ a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
return (NULL); \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \
a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \
arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@ -893,20 +893,20 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \
if ((ret = a_prefix##iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} else if (cmp > 0) { \
return (a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)); \
return a_prefix##iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg)); \
return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \
a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@ -919,22 +919,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \
} else { \
ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\
} \
return (ret); \
return ret; \
} \
a_attr a_type * \
a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \
if (node == NULL) { \
return (NULL); \
return NULL; \
} else { \
a_type *ret; \
if ((ret = a_prefix##reverse_iter_recurse(rbtree, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@ -947,20 +947,20 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \
if ((ret = a_prefix##reverse_iter_start(rbtree, start, \
rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \
(ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else if (cmp < 0) { \
return (a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_start(rbtree, start, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} else { \
a_type *ret; \
if ((ret = cb(rbtree, node, arg)) != NULL) { \
return (ret); \
return ret; \
} \
return (a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg)); \
return a_prefix##reverse_iter_recurse(rbtree, \
rbtn_left_get(a_type, a_field, node), cb, arg); \
} \
} \
a_attr a_type * \
@ -974,7 +974,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \
cb, arg); \
} \
return (ret); \
return ret; \
} \
a_attr void \
a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \

View File

@ -41,13 +41,13 @@ rtree_start_level(const rtree_t *rtree, uintptr_t key) {
unsigned start_level;
if (unlikely(key == 0)) {
return (rtree->height - 1);
return rtree->height - 1;
}
start_level = rtree->start_level[(lg_floor(key) + 1) >>
LG_RTREE_BITS_PER_LEVEL];
assert(start_level < rtree->height);
return (start_level);
return start_level;
}
JEMALLOC_ALWAYS_INLINE unsigned
@ -67,7 +67,7 @@ rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
start_level = rtree->start_level[(lg_floor(key_diff) + 1) >>
LG_RTREE_BITS_PER_LEVEL];
assert(start_level < rtree->height);
return (start_level);
return start_level;
}
JEMALLOC_ALWAYS_INLINE uintptr_t
@ -92,7 +92,7 @@ rtree_child_tryread(rtree_elm_t *elm, bool dependent) {
child = (rtree_elm_t *)atomic_read_p(&elm->pun);
}
assert(!dependent || child != NULL);
return (child);
return child;
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
@ -105,7 +105,7 @@ rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level,
child = rtree_child_read_hard(tsdn, rtree, elm, level);
}
assert(!dependent || child != NULL);
return (child);
return child;
}
JEMALLOC_ALWAYS_INLINE extent_t *
@ -132,7 +132,7 @@ rtree_elm_read(rtree_elm_t *elm, bool dependent) {
/* Mask the lock bit. */
extent = (extent_t *)((uintptr_t)extent & ~((uintptr_t)0x1));
return (extent);
return extent;
}
JEMALLOC_INLINE void
@ -151,7 +151,7 @@ rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) {
&rtree->levels[level].subtree_pun);
}
assert(!dependent || subtree != NULL);
return (subtree);
return subtree;
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
@ -164,7 +164,7 @@ rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
subtree = rtree_subtree_read_hard(tsdn, rtree, level);
}
assert(!dependent || subtree != NULL);
return (subtree);
return subtree;
}
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
@ -179,7 +179,7 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
if (dependent || init_missing) {
if (likely(rtree_ctx->valid)) {
if (key == rtree_ctx->key) {
return (rtree_ctx->elms[rtree->height]);
return rtree_ctx->elms[rtree->height];
} else {
unsigned no_ctx_start_level =
rtree_start_level(rtree, key);
@ -234,7 +234,7 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
if (init_missing) { \
rtree_ctx->valid = false; \
} \
return (NULL); \
return NULL; \
} \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
@ -253,7 +253,7 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
if (init_missing) { \
rtree_ctx->valid = false; \
} \
return (NULL); \
return NULL; \
} \
subkey = rtree_subkey(rtree, key, level - \
RTREE_GET_BIAS); \
@ -266,7 +266,7 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
rtree_ctx->elms[level - RTREE_GET_BIAS + 1] = \
node; \
} \
return (node);
return node;
#if RTREE_HEIGHT_MAX > 1
RTREE_GET_SUBTREE(0)
#endif
@ -334,12 +334,12 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true);
if (elm == NULL) {
return (true);
return true;
}
assert(rtree_elm_read(elm, false) == NULL);
rtree_elm_write(elm, extent);
return (false);
return false;
}
JEMALLOC_ALWAYS_INLINE extent_t *
@ -349,10 +349,10 @@ rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false);
if (elm == NULL) {
return (NULL);
return NULL;
}
return (rtree_elm_read(elm, dependent));
return rtree_elm_read(elm, dependent);
}
JEMALLOC_INLINE rtree_elm_t *
@ -363,7 +363,7 @@ rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent,
init_missing);
if (!dependent && elm == NULL) {
return (NULL);
return NULL;
}
{
extent_t *extent;
@ -380,7 +380,7 @@ rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
rtree_elm_witness_acquire(tsdn, rtree, key, elm);
}
return (elm);
return elm;
}
JEMALLOC_INLINE extent_t *
@ -395,7 +395,7 @@ rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
rtree_elm_witness_access(tsdn, rtree, elm);
}
return (extent);
return extent;
}
JEMALLOC_INLINE void

View File

@ -44,7 +44,7 @@ tcache_enabled_get(void) {
tsd_tcache_enabled_set(tsd, tcache_enabled);
}
return ((bool)tcache_enabled);
return (bool)tcache_enabled;
}
JEMALLOC_INLINE void
@ -69,19 +69,19 @@ tcache_get(tsd_t *tsd, bool create) {
tcache_t *tcache;
if (!config_tcache) {
return (NULL);
return NULL;
}
tcache = tsd_tcache_get(tsd);
if (!create) {
return (tcache);
return tcache;
}
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
tcache = tcache_get_hard(tsd);
tsd_tcache_set(tsd, tcache);
}
return (tcache);
return tcache;
}
JEMALLOC_ALWAYS_INLINE void
@ -102,7 +102,7 @@ tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
if (unlikely(tbin->ncached == 0)) {
tbin->low_water = -1;
*tcache_success = false;
return (NULL);
return NULL;
}
/*
* tcache_success (instead of ret) should be checked upon the return of
@ -119,7 +119,7 @@ tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
tbin->low_water = tbin->ncached;
}
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
@ -138,13 +138,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return (NULL);
return NULL;
}
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false) {
return (NULL);
return NULL;
}
}
@ -182,7 +182,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
tcache->prof_accumbytes += usize;
}
tcache_event(tsd, tcache);
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
@ -203,12 +203,12 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return (NULL);
return NULL;
}
ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
if (ret == NULL) {
return (NULL);
return NULL;
}
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
@ -242,7 +242,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
tcache_event(tsd, tcache);
return (ret);
return ret;
}
JEMALLOC_ALWAYS_INLINE void
@ -306,7 +306,7 @@ tcaches_get(tsd_t *tsd, unsigned ind) {
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
NULL));
}
return (elm->tcache);
return elm->tcache;
}
#endif

View File

@ -23,14 +23,14 @@ ticker_copy(ticker_t *ticker, const ticker_t *other) {
JEMALLOC_INLINE int32_t
ticker_read(const ticker_t *ticker) {
return (ticker->tick);
return ticker->tick;
}
JEMALLOC_INLINE bool
ticker_ticks(ticker_t *ticker, int32_t nticks) {
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return (true);
return true;
}
ticker->tick -= nticks;
return(false);
@ -38,7 +38,7 @@ ticker_ticks(ticker_t *ticker, int32_t nticks) {
JEMALLOC_INLINE bool
ticker_tick(ticker_t *ticker) {
return (ticker_ticks(ticker, 1));
return ticker_ticks(ticker, 1);
}
#endif

View File

@ -29,7 +29,7 @@ tsd_fetch_impl(bool init) {
tsd_t *tsd = tsd_get(init);
if (!init && tsd_get_allocates() && tsd == NULL) {
return (NULL);
return NULL;
}
assert(tsd != NULL);
@ -46,17 +46,17 @@ tsd_fetch_impl(bool init) {
}
}
return (tsd);
return tsd;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void) {
return (tsd_fetch_impl(true));
return tsd_fetch_impl(true);
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd) {
return ((tsdn_t *)tsd);
return (tsdn_t *)tsd;
}
JEMALLOC_INLINE bool
@ -67,12 +67,12 @@ tsd_nominal(tsd_t *tsd) {
#define O(n, t, c) \
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \
return (&tsd->n); \
return &tsd->n; \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return (*tsd_##n##p_get(tsd)); \
return *tsd_##n##p_get(tsd); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
@ -86,22 +86,22 @@ MALLOC_TSD
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void) {
if (!tsd_booted_get()) {
return (NULL);
return NULL;
}
return (tsd_tsdn(tsd_fetch_impl(false)));
return tsd_tsdn(tsd_fetch_impl(false));
}
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn) {
return (tsdn == NULL);
return tsdn == NULL;
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn) {
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
return &tsdn->tsd;
}
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
@ -113,9 +113,9 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
if (unlikely(tsdn_null(tsdn))) {
static const rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER;
memcpy(fallback, &rtree_ctx, sizeof(rtree_ctx_t));
return (fallback);
return fallback;
}
return (tsd_rtree_ctxp_get(tsdn_tsd(tsdn)));
return tsd_rtree_ctxp_get(tsdn_tsd(tsdn));
}
#endif

View File

@ -180,7 +180,7 @@ a_name##tsd_cleanup_wrapper(void) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
} \
return (a_name##tsd_initialized); \
return a_name##tsd_initialized; \
} \
a_attr bool \
a_name##tsd_boot0(void) { \
@ -189,7 +189,7 @@ a_name##tsd_boot0(void) { \
&a_name##tsd_cleanup_wrapper); \
} \
a_name##tsd_booted = true; \
return (false); \
return false; \
} \
a_attr void \
a_name##tsd_boot1(void) { \
@ -197,21 +197,21 @@ a_name##tsd_boot1(void) { \
} \
a_attr bool \
a_name##tsd_boot(void) { \
return (a_name##tsd_boot0()); \
return a_name##tsd_boot0(); \
} \
a_attr bool \
a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
return a_name##tsd_booted; \
} \
a_attr bool \
a_name##tsd_get_allocates(void) { \
return (false); \
return false; \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) { \
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
return &a_name##tsd_tls; \
} \
a_attr void \
a_name##tsd_set(a_type *val) { \
@ -232,11 +232,11 @@ a_name##tsd_boot0(void) { \
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) { \
return (true); \
return true; \
} \
} \
a_name##tsd_booted = true; \
return (false); \
return false; \
} \
a_attr void \
a_name##tsd_boot1(void) { \
@ -244,21 +244,21 @@ a_name##tsd_boot1(void) { \
} \
a_attr bool \
a_name##tsd_boot(void) { \
return (a_name##tsd_boot0()); \
return a_name##tsd_boot0(); \
} \
a_attr bool \
a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
return a_name##tsd_booted; \
} \
a_attr bool \
a_name##tsd_get_allocates(void) { \
return (false); \
return false; \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) { \
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
return &a_name##tsd_tls; \
} \
a_attr void \
a_name##tsd_set(a_type *val) { \
@ -289,7 +289,7 @@ a_name##tsd_cleanup_wrapper(void) { \
SetLastError(error); \
\
if (wrapper == NULL) { \
return (false); \
return false; \
} \
if (a_cleanup != malloc_tsd_no_cleanup && \
wrapper->initialized) { \
@ -297,11 +297,11 @@ a_name##tsd_cleanup_wrapper(void) { \
a_cleanup(&wrapper->val); \
if (wrapper->initialized) { \
/* Trigger another cleanup round. */ \
return (true); \
return true; \
} \
} \
malloc_tsd_dalloc(wrapper); \
return (false); \
return false; \
} \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) { \
@ -331,13 +331,13 @@ a_name##tsd_wrapper_get(bool init) { \
} \
a_name##tsd_wrapper_set(wrapper); \
} \
return (wrapper); \
return wrapper; \
} \
a_attr bool \
a_name##tsd_boot0(void) { \
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) { \
return (true); \
return true; \
} \
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
@ -345,7 +345,7 @@ a_name##tsd_boot0(void) { \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
return false; \
} \
a_attr void \
a_name##tsd_boot1(void) { \
@ -364,18 +364,18 @@ a_name##tsd_boot1(void) { \
a_attr bool \
a_name##tsd_boot(void) { \
if (a_name##tsd_boot0()) { \
return (true); \
return true; \
} \
a_name##tsd_boot1(); \
return (false); \
return false; \
} \
a_attr bool \
a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
return a_name##tsd_booted; \
} \
a_attr bool \
a_name##tsd_get_allocates(void) { \
return (true); \
return true; \
} \
/* Get/set. */ \
a_attr a_type * \
@ -385,9 +385,9 @@ a_name##tsd_get(bool init) { \
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) { \
return (NULL); \
return NULL; \
} \
return (&wrapper->val); \
return &wrapper->val; \
} \
a_attr void \
a_name##tsd_set(a_type *val) { \
@ -449,7 +449,7 @@ a_name##tsd_wrapper_get(bool init) { \
tsd_init_check_recursion(&a_name##tsd_init_head, \
&block); \
if (wrapper) { \
return (wrapper); \
return wrapper; \
} \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
@ -465,17 +465,17 @@ a_name##tsd_wrapper_get(bool init) { \
a_name##tsd_wrapper_set(wrapper); \
tsd_init_finish(&a_name##tsd_init_head, &block); \
} \
return (wrapper); \
return wrapper; \
} \
a_attr bool \
a_name##tsd_boot0(void) { \
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) { \
return (true); \
return true; \
} \
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
a_name##tsd_booted = true; \
return (false); \
return false; \
} \
a_attr void \
a_name##tsd_boot1(void) { \
@ -494,18 +494,18 @@ a_name##tsd_boot1(void) { \
a_attr bool \
a_name##tsd_boot(void) { \
if (a_name##tsd_boot0()) { \
return (true); \
return true; \
} \
a_name##tsd_boot1(); \
return (false); \
return false; \
} \
a_attr bool \
a_name##tsd_booted_get(void) { \
return (a_name##tsd_booted); \
return a_name##tsd_booted; \
} \
a_attr bool \
a_name##tsd_get_allocates(void) { \
return (true); \
return true; \
} \
/* Get/set. */ \
a_attr a_type * \
@ -515,9 +515,9 @@ a_name##tsd_get(bool init) { \
assert(a_name##tsd_booted); \
wrapper = a_name##tsd_wrapper_get(init); \
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) { \
return (NULL); \
return NULL; \
} \
return (&wrapper->val); \
return &wrapper->val; \
} \
a_attr void \
a_name##tsd_set(a_type *val) { \

View File

@ -26,27 +26,27 @@ int get_errno(void);
JEMALLOC_ALWAYS_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
return JEMALLOC_INTERNAL_FFSLL(bitmap);
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return (JEMALLOC_INTERNAL_FFSL(bitmap));
return JEMALLOC_INTERNAL_FFSL(bitmap);
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u(unsigned bitmap) {
return (JEMALLOC_INTERNAL_FFS(bitmap));
return JEMALLOC_INTERNAL_FFS(bitmap);
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_zu(size_t bitmap) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return (ffs_u(bitmap));
return ffs_u(bitmap);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
return (ffs_lu(bitmap));
return ffs_lu(bitmap);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
return (ffs_llu(bitmap));
return ffs_llu(bitmap);
#else
#error No implementation for size_t ffs()
#endif
@ -55,9 +55,9 @@ ffs_zu(size_t bitmap) {
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u64(uint64_t bitmap) {
#if LG_SIZEOF_LONG == 3
return (ffs_lu(bitmap));
return ffs_lu(bitmap);
#elif LG_SIZEOF_LONG_LONG == 3
return (ffs_llu(bitmap));
return ffs_llu(bitmap);
#else
#error No implementation for 64-bit ffs()
#endif
@ -66,11 +66,11 @@ ffs_u64(uint64_t bitmap) {
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u32(uint32_t bitmap) {
#if LG_SIZEOF_INT == 2
return (ffs_u(bitmap));
return ffs_u(bitmap);
#else
#error No implementation for 32-bit ffs()
#endif
return (ffs_u(bitmap));
return ffs_u(bitmap);
}
JEMALLOC_INLINE uint64_t
@ -83,7 +83,7 @@ pow2_ceil_u64(uint64_t x) {
x |= x >> 16;
x |= x >> 32;
x++;
return (x);
return x;
}
JEMALLOC_INLINE uint32_t
@ -95,16 +95,16 @@ pow2_ceil_u32(uint32_t x) {
x |= x >> 8;
x |= x >> 16;
x++;
return (x);
return x;
}
/* Compute the smallest power of 2 that is >= x. */
JEMALLOC_INLINE size_t
pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return (pow2_ceil_u64(x));
return pow2_ceil_u64(x);
#else
return (pow2_ceil_u32(x));
return pow2_ceil_u32(x);
#endif
}
@ -120,7 +120,7 @@ lg_floor(size_t x) {
: "r"(x) // Inputs.
);
assert(ret < UINT_MAX);
return ((unsigned)ret);
return (unsigned)ret;
}
#elif (defined(_MSC_VER))
JEMALLOC_INLINE unsigned
@ -137,7 +137,7 @@ lg_floor(size_t x) {
# error "Unsupported type size for lg_floor()"
#endif
assert(ret < UINT_MAX);
return ((unsigned)ret);
return (unsigned)ret;
}
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
JEMALLOC_INLINE unsigned
@ -145,9 +145,9 @@ lg_floor(size_t x) {
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x));
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x));
return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
#else
# error "Unsupported type size for lg_floor()"
#endif
@ -166,10 +166,10 @@ lg_floor(size_t x) {
x |= (x >> 32);
#endif
if (x == SIZE_T_MAX) {
return ((8 << LG_SIZEOF_PTR) - 1);
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return (ffs_zu(x) - 2);
return ffs_zu(x) - 2;
}
#endif
@ -187,9 +187,9 @@ set_errno(int errnum) {
JEMALLOC_INLINE int
get_errno(void) {
#ifdef _WIN32
return (GetLastError());
return GetLastError();
#else
return (errno);
return errno;
#endif
}
#endif

View File

@ -22,11 +22,11 @@ witness_owner(tsd_t *tsd, const witness_t *witness) {
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness) {
return (true);
return true;
}
}
return (false);
return false;
}
JEMALLOC_INLINE void

View File

@ -10,13 +10,13 @@ static __forceinline int ffsl(long x) {
unsigned long i;
if (_BitScanForward(&i, x)) {
return (i + 1);
return i + 1;
}
return (0);
return 0;
}
static __forceinline int ffs(int x) {
return (ffsl(x));
return ffsl(x);
}
# ifdef _M_X64
@ -27,9 +27,9 @@ static __forceinline int ffsll(unsigned __int64 x) {
unsigned long i;
#ifdef _M_X64
if (_BitScanForward64(&i, x)) {
return (i + 1);
return i + 1;
}
return (0);
return 0;
#else
// Fallback for 32-bit build where 64-bit version not available
// assuming little endian
@ -41,11 +41,11 @@ static __forceinline int ffsll(unsigned __int64 x) {
s.ll = x;
if (_BitScanForward(&i, s.l[0])) {
return (i + 1);
return i + 1;
} else if(_BitScanForward(&i, s.l[1])) {
return (i + 33);
return i + 33;
}
return (0);
return 0;
#endif
}

View File

@ -50,8 +50,8 @@ arena_extent_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_assert_owner(tsdn, &arena->lock);
return (extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
pad, alignment, zero, &commit, slab));
return extent_alloc_cache(tsdn, arena, r_extent_hooks, new_addr, usize,
pad, alignment, zero, &commit, slab);
}
extent_t *
@ -65,7 +65,7 @@ arena_extent_cache_alloc(tsdn_t *tsdn, arena_t *arena,
new_addr, size, 0, alignment, zero, false);
malloc_mutex_unlock(tsdn, &arena->lock);
return (extent);
return extent;
}
static void
@ -122,7 +122,7 @@ arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
ret = (void *)((uintptr_t)extent_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
slab_data->nfree--;
return (ret);
return ret;
}
#ifndef JEMALLOC_JET
@ -160,7 +160,7 @@ arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
assert(regind < arena_bin_info[binind].nregs);
return (regind);
return regind;
}
JEMALLOC_INLINE_C void
@ -282,7 +282,7 @@ arena_extent_alloc_large_hard(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_unlock(tsdn, &arena->lock);
}
return (extent);
return extent;
}
extent_t *
@ -308,7 +308,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
&extent_hooks, usize, alignment, zero);
}
return (extent);
return extent;
}
void
@ -409,7 +409,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena) {
}
npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
return npages_limit_backlog;
}
static void
@ -499,12 +499,12 @@ arena_decay_init(arena_t *arena, ssize_t decay_time) {
static bool
arena_decay_time_valid(ssize_t decay_time) {
if (decay_time < -1) {
return (false);
return false;
}
if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) {
return (true);
return true;
}
return (false);
return false;
}
ssize_t
@ -515,13 +515,13 @@ arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) {
decay_time = arena->decay.time;
malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
return decay_time;
}
bool
arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return (true);
return true;
}
malloc_mutex_lock(tsdn, &arena->lock);
@ -537,7 +537,7 @@ arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) {
arena_maybe_purge(tsdn, arena);
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
return false;
}
static void
@ -609,7 +609,7 @@ arena_dirty_count(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
return (ndirty);
return ndirty;
}
static size_t
@ -648,7 +648,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
}
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
return (nstashed);
return nstashed;
}
static size_t
@ -680,7 +680,7 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena,
arena->stats.purged += npurged;
}
return (npurged);
return npurged;
}
/*
@ -757,12 +757,12 @@ static extent_t *
arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return (NULL);
return NULL;
}
if (config_stats) {
bin->stats.reslabs++;
}
return (slab);
return slab;
}
static void
@ -936,7 +936,7 @@ arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
bin_info->slab_size, 0, PAGE, &zero, &commit, true);
malloc_mutex_lock(tsdn, &arena->lock);
return (slab);
return slab;
}
static extent_t *
@ -953,7 +953,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
bin_info);
if (slab == NULL) {
return (NULL);
return NULL;
}
}
assert(extent_slab_get(slab));
@ -970,7 +970,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
arena->stats.mapped += extent_size_get(slab);
}
return (slab);
return slab;
}
static extent_t *
@ -982,7 +982,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
/* Look for a usable slab. */
slab = arena_bin_slabs_nonfull_tryget(bin);
if (slab != NULL) {
return (slab);
return slab;
}
/* No existing slabs have any space available. */
@ -1001,7 +1001,7 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
bin->stats.nslabs++;
bin->stats.curslabs++;
}
return (slab);
return slab;
}
/*
@ -1011,10 +1011,10 @@ arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
*/
slab = arena_bin_slabs_nonfull_tryget(bin);
if (slab != NULL) {
return (slab);
return slab;
}
return (NULL);
return NULL;
}
/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
@ -1057,7 +1057,7 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
bin);
}
}
return (ret);
return ret;
}
arena_bin_slabs_full_insert(bin, bin->slabcur);
@ -1065,13 +1065,13 @@ arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
}
if (slab == NULL) {
return (NULL);
return NULL;
}
bin->slabcur = slab;
assert(extent_slab_data_get(bin->slabcur)->nfree > 0);
return (arena_slab_reg_alloc(tsdn, slab, bin_info));
return arena_slab_reg_alloc(tsdn, slab, bin_info);
}
void
@ -1172,7 +1172,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock);
return (NULL);
return NULL;
}
if (config_stats) {
@ -1203,7 +1203,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
}
arena_decay_tick(tsdn, arena);
return (ret);
return ret;
}
void *
@ -1215,13 +1215,13 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
arena = arena_choose(tsdn_tsd(tsdn), arena);
}
if (unlikely(arena == NULL)) {
return (NULL);
return NULL;
}
if (likely(size <= SMALL_MAXCLASS)) {
return (arena_malloc_small(tsdn, arena, ind, zero));
return arena_malloc_small(tsdn, arena, ind, zero);
}
return (large_malloc(tsdn, arena, index2size(ind), zero));
return large_malloc(tsdn, arena, index2size(ind), zero);
}
void *
@ -1241,7 +1241,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
ret = large_palloc(tsdn, arena, usize, alignment, zero);
}
}
return (ret);
return ret;
}
void
@ -1282,7 +1282,7 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
assert(isalloc(tsdn, extent, ptr) == LARGE_MINCLASS);
return (LARGE_MINCLASS);
return LARGE_MINCLASS;
}
void
@ -1425,7 +1425,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
if (unlikely(size > LARGE_MAXCLASS)) {
return (true);
return true;
}
usize_min = s2u(size);
@ -1440,31 +1440,31 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
size2index(oldsize)) && (size > oldsize || usize_max <
oldsize)) {
return (true);
return true;
}
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
return false;
} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
return (large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
zero));
return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
zero);
}
return (true);
return true;
}
static void *
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache) {
if (alignment == 0) {
return (arena_malloc(tsdn, arena, usize, size2index(usize),
zero, tcache, true));
return arena_malloc(tsdn, arena, usize, size2index(usize),
zero, tcache, true);
}
usize = sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return (NULL);
return NULL;
}
return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
}
void *
@ -1476,20 +1476,20 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
usize = s2u(size);
if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
return (NULL);
return NULL;
}
if (likely(usize <= SMALL_MAXCLASS)) {
/* Try to avoid moving the allocation. */
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
zero)) {
return (ptr);
return ptr;
}
}
if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
return (large_ralloc(tsdn, arena, extent, usize, alignment,
zero, tcache));
return large_ralloc(tsdn, arena, extent, usize, alignment,
zero, tcache);
}
/*
@ -1499,7 +1499,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero,
tcache);
if (ret == NULL) {
return (NULL);
return NULL;
}
/*
@ -1510,7 +1510,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
return (ret);
return ret;
}
dss_prec_t
@ -1520,7 +1520,7 @@ arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) {
malloc_mutex_lock(tsdn, &arena->lock);
ret = arena->dss_prec;
malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
return ret;
}
bool
@ -1531,21 +1531,21 @@ arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) {
malloc_mutex_lock(tsdn, &arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
return false;
}
ssize_t
arena_decay_time_default_get(void) {
return ((ssize_t)atomic_read_zu((size_t *)&decay_time_default));
return (ssize_t)atomic_read_zu((size_t *)&decay_time_default);
}
bool
arena_decay_time_default_set(ssize_t decay_time) {
if (!arena_decay_time_valid(decay_time)) {
return (true);
return true;
}
atomic_write_zu((size_t *)&decay_time_default, (size_t)decay_time);
return (false);
return false;
}
static void
@ -1642,7 +1642,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
unsigned
arena_nthreads_get(arena_t *arena, bool internal) {
return (atomic_read_u(&arena->nthreads[internal]));
return atomic_read_u(&arena->nthreads[internal]);
}
void
@ -1657,7 +1657,7 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
size_t
arena_extent_sn_next(arena_t *arena) {
return (atomic_add_zu(&arena->extent_sn_next, 1) - 1);
return atomic_add_zu(&arena->extent_sn_next, 1) - 1;
}
arena_t *
@ -1671,7 +1671,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
} else {
base = base_new(tsdn, ind, extent_hooks);
if (base == NULL) {
return (NULL);
return NULL;
}
}
@ -1762,12 +1762,12 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena->base = base;
return (arena);
return arena;
label_error:
if (ind != 0) {
base_delete(base);
}
return (NULL);
return NULL;
}
void

View File

@ -23,7 +23,7 @@ base_map(extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
&zero, &commit, ind);
}
return (addr);
return addr;
}
static void
@ -105,7 +105,7 @@ base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
extent_init(extent, NULL, (void *)((uintptr_t)extent_addr_get(extent) +
*gap_size + size), extent_size_get(extent) - *gap_size - size, 0,
extent_sn_get(extent), true, true, true, false);
return (ret);
return ret;
}
static void
@ -142,7 +142,7 @@ base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
return (ret);
return ret;
}
/*
@ -163,14 +163,14 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
block_size = HUGEPAGE_CEILING(header_size + gap_size + usize);
block = (base_block_t *)base_map(extent_hooks, ind, block_size);
if (block == NULL) {
return (NULL);
return NULL;
}
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
base_extent_init(extent_sn_next, &block->extent,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return (block);
return block;
}
/*
@ -187,7 +187,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
block = base_block_alloc(extent_hooks, base_ind_get(base),
&base->extent_sn_next, size, alignment);
if (block == NULL) {
return (NULL);
return NULL;
}
block->next = base->blocks;
base->blocks = block;
@ -198,12 +198,12 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
}
return (&block->extent);
return &block->extent;
}
base_t *
b0get(void) {
return (b0);
return b0;
}
base_t *
@ -217,7 +217,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
block = base_block_alloc(extent_hooks, ind, &extent_sn_next,
sizeof(base_t), QUANTUM);
if (block == NULL) {
return (NULL);
return NULL;
}
base_alignment = CACHELINE;
@ -228,7 +228,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->extent_hooks = extent_hooks;
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE)) {
base_unmap(extent_hooks, ind, block, block->size);
return (NULL);
return NULL;
}
base->extent_sn_next = extent_sn_next;
base->blocks = block;
@ -245,7 +245,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
base_size);
return (base);
return base;
}
void
@ -262,7 +262,7 @@ base_delete(base_t *base) {
extent_hooks_t *
base_extent_hooks_get(base_t *base) {
return ((extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun));
return (extent_hooks_t *)atomic_read_p(&base->extent_hooks_pun);
}
extent_hooks_t *
@ -276,7 +276,7 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
u.h = &base->extent_hooks;
atomic_write_p(u.v, extent_hooks);
return (old_extent_hooks);
return old_extent_hooks;
}
/*
@ -319,7 +319,7 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return (ret);
return ret;
}
void

View File

@ -35,7 +35,7 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return (binfo->levels[binfo->nlevels].group_offset);
return binfo->levels[binfo->nlevels].group_offset;
}
void
@ -80,7 +80,7 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
static size_t
bitmap_info_ngroups(const bitmap_info_t *binfo) {
return (binfo->ngroups);
return binfo->ngroups;
}
void

View File

@ -57,11 +57,11 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
return ((bucket << LG_CKH_BUCKET_CELLS) + i);
return (bucket << LG_CKH_BUCKET_CELLS) + i;
}
}
return (SIZE_T_MAX);
return SIZE_T_MAX;
}
/*
@ -79,13 +79,13 @@ ckh_isearch(ckh_t *ckh, const void *key) {
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX) {
return (cell);
return cell;
}
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
return cell;
}
JEMALLOC_INLINE_C bool
@ -107,11 +107,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell->key = key;
cell->data = data;
ckh->count++;
return (false);
return false;
}
}
return (true);
return true;
}
/*
@ -181,12 +181,12 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
return (true);
return true;
}
bucket = tbucket;
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false);
return false;
}
}
}
@ -202,19 +202,19 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false);
return false;
}
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
return (false);
return false;
}
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
}
/*
@ -234,13 +234,13 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
return (true);
return true;
}
nins++;
}
}
return (false);
return false;
}
static bool
@ -296,7 +296,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
ret = false;
label_return:
return (ret);
return ret;
}
static void
@ -403,7 +403,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = false;
label_return:
return (ret);
return ret;
}
void
@ -433,7 +433,7 @@ size_t
ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
return (ckh->count);
return ckh->count;
}
bool
@ -450,11 +450,11 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
*data = (void *)ckh->tab[i].data;
}
*tabind = i + 1;
return (false);
return false;
}
}
return (true);
return true;
}
bool
@ -477,7 +477,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
ret = false;
label_return:
return (ret);
return ret;
}
bool
@ -507,10 +507,10 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
ckh_shrink(tsd, ckh);
}
return (false);
return false;
}
return (true);
return true;
}
bool
@ -527,10 +527,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
}
return (false);
return false;
}
return (true);
return true;
}
void
@ -543,7 +543,7 @@ ckh_string_keycomp(const void *k1, const void *k2) {
assert(k1 != NULL);
assert(k2 != NULL);
return (strcmp((char *)k1, (char *)k2) ? false : true);
return !strcmp((char *)k1, (char *)k2);
}
void
@ -560,5 +560,5 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) {
bool
ckh_pointer_keycomp(const void *k1, const void *k2) {
return ((k1 == k2) ? true : false);
return (k1 == k2);
}

144
src/ctl.c
View File

@ -464,12 +464,12 @@ arenas_i2a_impl(size_t i, bool compat, bool validate) {
break;
}
return (a);
return a;
}
static unsigned
arenas_i2a(size_t i) {
return (arenas_i2a_impl(i, true, false));
return arenas_i2a_impl(i, true, false);
}
static ctl_arena_t *
@ -505,14 +505,14 @@ arenas_i_impl(tsdn_t *tsdn, size_t i, bool compat, bool init) {
}
assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
return (ret);
return ret;
}
static ctl_arena_t *
arenas_i(size_t i) {
ctl_arena_t *ret = arenas_i_impl(TSDN_NULL, i, true, false);
assert(ret != NULL);
return (ret);
return ret;
}
static void
@ -692,19 +692,19 @@ ctl_arena_init(tsdn_t *tsdn, extent_hooks_t *extent_hooks) {
/* Trigger stats allocation. */
if (arenas_i_impl(tsdn, arena_ind, false, true) == NULL) {
return (UINT_MAX);
return UINT_MAX;
}
/* Initialize new arena. */
if (arena_init(tsdn, arena_ind, extent_hooks) == NULL) {
return (UINT_MAX);
return UINT_MAX;
}
if (arena_ind == ctl_arenas->narenas) {
ctl_arenas->narenas++;
}
return (arena_ind);
return arena_ind;
}
static void
@ -819,7 +819,7 @@ ctl_init(tsdn_t *tsdn) {
ret = false;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
return ret;
}
static int
@ -917,7 +917,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = 0;
label_return:
return (ret);
return ret;
}
int
@ -1019,12 +1019,12 @@ label_return:
bool
ctl_boot(void) {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL)) {
return (true);
return true;
}
ctl_initialized = false;
return (false);
return false;
}
void
@ -1110,7 +1110,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
t oldval; \
\
if (!(c)) { \
return (ENOENT); \
return ENOENT; \
} \
if (l) { \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
@ -1124,7 +1124,7 @@ label_return: \
if (l) { \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
} \
return (ret); \
return ret; \
}
#define CTL_RO_CGEN(c, n, v, t) \
@ -1135,7 +1135,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
t oldval; \
\
if (!(c)) { \
return (ENOENT); \
return ENOENT; \
} \
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
@ -1145,7 +1145,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
return ret; \
}
#define CTL_RO_GEN(n, v, t) \
@ -1163,7 +1163,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
ret = 0; \
label_return: \
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
return ret; \
}
/*
@ -1178,7 +1178,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
t oldval; \
\
if (!(c)) { \
return (ENOENT); \
return ENOENT; \
} \
READONLY(); \
oldval = (v); \
@ -1186,7 +1186,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
ret = 0; \
label_return: \
return (ret); \
return ret; \
}
#define CTL_RO_NL_GEN(n, v, t) \
@ -1202,7 +1202,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
ret = 0; \
label_return: \
return (ret); \
return ret; \
}
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
@ -1213,7 +1213,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
t oldval; \
\
if (!(c)) { \
return (ENOENT); \
return ENOENT; \
} \
READONLY(); \
oldval = (m(tsd)); \
@ -1221,7 +1221,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
ret = 0; \
label_return: \
return (ret); \
return ret; \
}
#define CTL_RO_CONFIG_GEN(n, t) \
@ -1237,7 +1237,7 @@ n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
\
ret = 0; \
label_return: \
return (ret); \
return ret; \
}
/******************************************************************************/
@ -1260,7 +1260,7 @@ epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
/******************************************************************************/
@ -1316,7 +1316,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL) {
return (EAGAIN);
return EAGAIN;
}
newind = oldind = arena_ind_get(oldarena);
@ -1350,7 +1350,7 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
@ -1369,7 +1369,7 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval;
if (!config_tcache) {
return (ENOENT);
return ENOENT;
}
oldval = tcache_enabled_get();
@ -1384,7 +1384,7 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1393,7 +1393,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
int ret;
if (!config_tcache) {
return (ENOENT);
return ENOENT;
}
READONLY();
@ -1403,7 +1403,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1412,7 +1412,7 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
READ_XOR_WRITE();
@ -1434,7 +1434,7 @@ thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1444,7 +1444,7 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
bool oldval;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
oldval = prof_thread_active_get(tsd);
@ -1462,7 +1462,7 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
/******************************************************************************/
@ -1474,7 +1474,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
unsigned tcache_ind;
if (!config_tcache) {
return (ENOENT);
return ENOENT;
}
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
@ -1488,7 +1488,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
static int
@ -1498,7 +1498,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
unsigned tcache_ind;
if (!config_tcache) {
return (ENOENT);
return ENOENT;
}
WRITEONLY();
@ -1512,7 +1512,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1522,7 +1522,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
unsigned tcache_ind;
if (!config_tcache) {
return (ENOENT);
return ENOENT;
}
WRITEONLY();
@ -1536,7 +1536,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
/******************************************************************************/
@ -1560,7 +1560,7 @@ arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
return (ret);
return ret;
}
static void
@ -1622,7 +1622,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1638,7 +1638,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1664,7 +1664,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1677,12 +1677,12 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
return (ret);
return ret;
}
arena_reset(tsd, arena);
return (ret);
return ret;
}
static int
@ -1721,7 +1721,7 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(ret == 0);
label_return:
return (ret);
return ret;
}
static int
@ -1782,7 +1782,7 @@ arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
static int
@ -1817,7 +1817,7 @@ arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -1851,7 +1851,7 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
static const ctl_named_node_t *
@ -1874,7 +1874,7 @@ arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
ret = super_arena_i_node;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
return ret;
}
/******************************************************************************/
@ -1897,7 +1897,7 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
static int
@ -1922,7 +1922,7 @@ arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
@ -1936,9 +1936,9 @@ CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
if (i > NBINS) {
return (NULL);
return NULL;
}
return (super_arenas_bin_i_node);
return super_arenas_bin_i_node;
}
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
@ -1947,9 +1947,9 @@ static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {
if (i > NSIZES - NBINS) {
return (NULL);
return NULL;
}
return (super_arenas_lextent_i_node);
return super_arenas_lextent_i_node;
}
static int
@ -1973,7 +1973,7 @@ arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
/******************************************************************************/
@ -1985,7 +1985,7 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
if (newp != NULL) {
@ -2002,7 +2002,7 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -2012,7 +2012,7 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
bool oldval;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
if (newp != NULL) {
@ -2028,7 +2028,7 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -2038,7 +2038,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const char *filename = NULL;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
WRITEONLY();
@ -2051,7 +2051,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -2061,7 +2061,7 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
bool oldval;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
if (newp != NULL) {
@ -2077,7 +2077,7 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
static int
@ -2087,7 +2087,7 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t lg_sample = lg_prof_sample;
if (!config_prof) {
return (ENOENT);
return ENOENT;
}
WRITEONLY();
@ -2100,7 +2100,7 @@ prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
return (ret);
return ret;
}
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
@ -2180,9 +2180,9 @@ static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j) {
if (j > NBINS) {
return (NULL);
return NULL;
}
return (super_stats_arenas_i_bins_j_node);
return super_stats_arenas_i_bins_j_node;
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
@ -2198,9 +2198,9 @@ static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t j) {
if (j > NSIZES - NBINS) {
return (NULL);
return NULL;
}
return (super_stats_arenas_i_lextents_j_node);
return super_stats_arenas_i_lextents_j_node;
}
static const ctl_named_node_t *
@ -2218,5 +2218,5 @@ stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
ret = super_stats_arenas_i_node;
label_return:
malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
return ret;
}

View File

@ -82,12 +82,12 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) {
extent = ql_last(&arena->extent_cache, ql_link);
if (extent == NULL) {
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (base_alloc(tsdn, arena->base, sizeof(extent_t),
QUANTUM));
return base_alloc(tsdn, arena->base, sizeof(extent_t),
QUANTUM);
}
ql_tail_remove(&arena->extent_cache, extent_t, ql_link);
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
return (extent);
return extent;
}
void
@ -100,12 +100,12 @@ extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
extent_hooks_t *
extent_hooks_get(arena_t *arena) {
return (base_extent_hooks_get(arena->base));
return base_extent_hooks_get(arena->base);
}
extent_hooks_t *
extent_hooks_set(arena_t *arena, extent_hooks_t *extent_hooks) {
return (base_extent_hooks_set(arena->base, extent_hooks));
return base_extent_hooks_set(arena->base, extent_hooks);
}
static void
@ -139,11 +139,11 @@ extent_size_quantize_floor(size_t size) {
* PAGE-spaced size classes, but it's simplest to just handle
* the one case that would cause erroneous results.
*/
return (size);
return size;
}
ret = pind2sz(pind - 1) + large_pad;
assert(ret <= size);
return (ret);
return ret;
}
#ifdef JEMALLOC_JET
#undef extent_size_quantize_floor
@ -176,7 +176,7 @@ extent_size_quantize_ceil(size_t size) {
*/
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
}
return (ret);
return ret;
}
#ifdef JEMALLOC_JET
#undef extent_size_quantize_ceil
@ -217,7 +217,7 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
*r_elm_a = rtree_elm_acquire(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_base_get(extent), dependent, init_missing);
if (!dependent && *r_elm_a == NULL) {
return (true);
return true;
}
assert(*r_elm_a != NULL);
@ -227,14 +227,14 @@ extent_rtree_acquire(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
init_missing);
if (!dependent && *r_elm_b == NULL) {
rtree_elm_release(tsdn, &extents_rtree, *r_elm_a);
return (true);
return true;
}
assert(*r_elm_b != NULL);
} else {
*r_elm_b = NULL;
}
return (false);
return false;
}
static void
@ -308,7 +308,7 @@ extent_register(tsdn_t *tsdn, const extent_t *extent) {
if (extent_rtree_acquire(tsdn, rtree_ctx, extent, false, true, &elm_a,
&elm_b)) {
return (true);
return true;
}
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
if (extent_slab_get(extent)) {
@ -320,7 +320,7 @@ extent_register(tsdn_t *tsdn, const extent_t *extent) {
extent_gprof_add(tsdn, extent);
}
return (false);
return false;
}
static void
@ -378,11 +378,11 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
for (i = pind; i < NPSIZES+1; i++) {
extent_t *extent = extent_heap_first(&extent_heaps[i]);
if (extent != NULL) {
return (extent);
return extent;
}
}
return (NULL);
return NULL;
}
static void
@ -444,7 +444,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
alloc_size = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < usize) {
return (NULL);
return NULL;
}
if (!locked) {
malloc_mutex_lock(tsdn, &arena->extents_mtx);
@ -479,7 +479,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
}
return (NULL);
return NULL;
}
extent_heaps_remove(tsdn, extent_heaps, extent);
arena_extent_cache_maybe_remove(tsdn, arena, extent, cache);
@ -508,7 +508,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
}
return (NULL);
return NULL;
}
extent_heaps_insert(tsdn, extent_heaps, lead);
arena_extent_cache_maybe_insert(tsdn, arena, lead, cache);
@ -525,7 +525,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!locked) {
malloc_mutex_unlock(tsdn, &arena->extents_mtx);
}
return (NULL);
return NULL;
}
extent_heaps_insert(tsdn, extent_heaps, trail);
arena_extent_cache_maybe_insert(tsdn, arena, trail, cache);
@ -545,7 +545,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
}
extent_record(tsdn, arena, r_extent_hooks, extent_heaps,
cache, extent);
return (NULL);
return NULL;
}
extent_zeroed_set(extent, true);
}
@ -577,7 +577,7 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
}
}
}
return (extent);
return extent;
}
/*
@ -598,22 +598,22 @@ extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
if (have_dss && dss_prec == dss_prec_primary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return (ret);
return ret;
}
/* mmap. */
if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
!= NULL) {
return (ret);
return ret;
}
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
commit)) != NULL) {
return (ret);
return ret;
}
/* All strategies for allocation failed. */
return (NULL);
return NULL;
}
static extent_t *
@ -628,7 +628,7 @@ extent_alloc_cache_impl(tsdn_t *tsdn, arena_t *arena,
extent = extent_recycle(tsdn, arena, r_extent_hooks,
arena->extents_cached, locked, true, new_addr, usize, pad,
alignment, zero, commit, slab);
return (extent);
return extent;
}
extent_t *
@ -637,16 +637,16 @@ extent_alloc_cache_locked(tsdn_t *tsdn, arena_t *arena,
size_t alignment, bool *zero, bool *commit, bool slab) {
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
new_addr, usize, pad, alignment, zero, commit, slab));
return extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, true,
new_addr, usize, pad, alignment, zero, commit, slab);
}
extent_t *
extent_alloc_cache(tsdn_t *tsdn, arena_t *arena,
extent_hooks_t **r_extent_hooks, void *new_addr, size_t usize, size_t pad,
size_t alignment, bool *zero, bool *commit, bool slab) {
return (extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
new_addr, usize, pad, alignment, zero, commit, slab));
return extent_alloc_cache_impl(tsdn, arena, r_extent_hooks, false,
new_addr, usize, pad, alignment, zero, commit, slab);
}
static void *
@ -656,7 +656,7 @@ extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
return (ret);
return ret;
}
static void *
@ -675,8 +675,8 @@ extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
*/
assert(arena != NULL);
return (extent_alloc_default_impl(tsdn, arena, new_addr, size,
alignment, zero, commit));
return extent_alloc_default_impl(tsdn, arena, new_addr, size,
alignment, zero, commit);
}
static void
@ -714,14 +714,14 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size_min < usize) {
return (NULL);
return NULL;
}
if (alloc_size < alloc_size_min) {
return (NULL);
return NULL;
}
extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
return (NULL);
return NULL;
}
zeroed = false;
committed = false;
@ -731,7 +731,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
arena_extent_sn_next(arena), false, zeroed, committed, false);
if (ptr == NULL || extent_register(tsdn, extent)) {
extent_dalloc(tsdn, arena, extent);
return (NULL);
return NULL;
}
/*
* Set the extent as active *after registration so that no gprof-related
@ -759,7 +759,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (extent == NULL) {
extent_deregister(tsdn, lead);
extent_leak(tsdn, arena, r_extent_hooks, false, lead);
return (NULL);
return NULL;
}
extent_retain(tsdn, arena, r_extent_hooks, lead);
}
@ -771,7 +771,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (trail == NULL) {
extent_deregister(tsdn, extent);
extent_leak(tsdn, arena, r_extent_hooks, false, extent);
return (NULL);
return NULL;
}
extent_retain(tsdn, arena, r_extent_hooks, trail);
} else if (leadsize == 0) {
@ -786,7 +786,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (extent_commit_wrapper(tsdn, arena, r_extent_hooks, extent,
0, extent_size_get(extent))) {
extent_retain(tsdn, arena, r_extent_hooks, extent);
return (NULL);
return NULL;
}
extent_zeroed_set(extent, true);
}
@ -812,7 +812,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (arena->extent_grow_next + 1 < NPSIZES) {
arena->extent_grow_next++;
}
return (extent);
return extent;
}
static extent_t *
@ -841,7 +841,7 @@ extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
new_addr, usize, pad, alignment, zero, commit, slab);
}
return (extent);
return extent;
}
static extent_t *
@ -855,7 +855,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
size = usize + pad;
extent = extent_alloc(tsdn, arena);
if (extent == NULL) {
return (NULL);
return NULL;
}
if (*r_extent_hooks == &extent_hooks_default) {
/* Call directly to propagate tsdn. */
@ -867,7 +867,7 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
}
if (addr == NULL) {
extent_dalloc(tsdn, arena, extent);
return (NULL);
return NULL;
}
extent_init(extent, arena, addr, size, usize,
arena_extent_sn_next(arena), true, zero, commit, slab);
@ -876,10 +876,10 @@ extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
}
if (extent_register(tsdn, extent)) {
extent_leak(tsdn, arena, r_extent_hooks, false, extent);
return (NULL);
return NULL;
}
return (extent);
return extent;
}
extent_t *
@ -897,25 +897,25 @@ extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
new_addr, usize, pad, alignment, zero, commit, slab);
}
return (extent);
return extent;
}
static bool
extent_can_coalesce(const extent_t *a, const extent_t *b) {
if (extent_arena_get(a) != extent_arena_get(b)) {
return (false);
return false;
}
if (extent_active_get(a) != extent_active_get(b)) {
return (false);
return false;
}
if (extent_committed_get(a) != extent_committed_get(b)) {
return (false);
return false;
}
if (extent_retained_get(a) != extent_retained_get(b)) {
return (false);
return false;
}
return (true);
return true;
}
static void
@ -1016,9 +1016,9 @@ extent_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
static bool
extent_dalloc_default_impl(void *addr, size_t size) {
if (!have_dss || !extent_in_dss(addr)) {
return (extent_dalloc_mmap(addr, size));
return extent_dalloc_mmap(addr, size);
}
return (true);
return true;
}
static bool
@ -1026,7 +1026,7 @@ extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
bool committed, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (extent_dalloc_default_impl(addr, size));
return extent_dalloc_default_impl(addr, size);
}
bool
@ -1060,7 +1060,7 @@ extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
extent_dalloc(tsdn, arena, extent);
}
return (err);
return err;
}
void
@ -1110,8 +1110,8 @@ extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length));
return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
bool
@ -1125,7 +1125,7 @@ extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
(*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
extent_size_get(extent), offset, length, arena_ind_get(arena)));
extent_committed_set(extent, extent_committed_get(extent) || !err);
return (err);
return err;
}
static bool
@ -1133,8 +1133,8 @@ extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t offset, size_t length, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length));
return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
bool
@ -1150,7 +1150,7 @@ extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_base_get(extent), extent_size_get(extent), offset, length,
arena_ind_get(arena)));
extent_committed_set(extent, extent_committed_get(extent) && err);
return (err);
return err;
}
#ifdef PAGES_CAN_PURGE_LAZY
@ -1163,8 +1163,8 @@ extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return (pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length));
return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
length);
}
#endif
@ -1189,8 +1189,8 @@ extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
assert(length != 0);
assert((length & PAGE_MASK) == 0);
return (pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length));
return pages_purge_forced((void *)((uintptr_t)addr +
(uintptr_t)offset), length);
}
#endif
@ -1211,10 +1211,7 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
if (!maps_coalesce) {
return (true);
}
return (false);
return !maps_coalesce;
}
#endif
@ -1232,7 +1229,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->split == NULL) {
return (NULL);
return NULL;
}
trail = extent_alloc(tsdn, arena);
@ -1278,7 +1275,7 @@ extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
return (trail);
return trail;
label_error_d:
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
label_error_c:
@ -1286,19 +1283,19 @@ label_error_c:
label_error_b:
extent_dalloc(tsdn, arena, trail);
label_error_a:
return (NULL);
return NULL;
}
static bool
extent_merge_default_impl(void *addr_a, void *addr_b) {
if (!maps_coalesce) {
return (true);
return true;
}
if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
return (true);
return true;
}
return (false);
return false;
}
#ifdef JEMALLOC_MAPS_COALESCE
@ -1307,7 +1304,7 @@ extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
assert(extent_hooks == &extent_hooks_default);
return (extent_merge_default_impl(addr_a, addr_b));
return extent_merge_default_impl(addr_a, addr_b);
}
#endif
@ -1322,7 +1319,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_hooks_assure_initialized(arena, r_extent_hooks);
if ((*r_extent_hooks)->merge == NULL) {
return (true);
return true;
}
if (*r_extent_hooks == &extent_hooks_default) {
@ -1337,7 +1334,7 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
}
if (err) {
return (true);
return true;
}
/*
@ -1372,19 +1369,19 @@ extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
extent_dalloc(tsdn, extent_arena_get(b), b);
return (false);
return false;
}
bool
extent_boot(void) {
if (rtree_new(&extents_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
LG_PAGE))) {
return (true);
return true;
}
if (have_dss) {
extent_dss_boot();
}
return (false);
return false;
}

View File

@ -32,10 +32,10 @@ static void *dss_max;
static void *
extent_dss_sbrk(intptr_t increment) {
#ifdef JEMALLOC_DSS
return (sbrk(increment));
return sbrk(increment);
#else
not_implemented();
return (NULL);
return NULL;
#endif
}
@ -44,10 +44,10 @@ extent_dss_prec_get(void) {
dss_prec_t ret;
if (!have_dss) {
return (dss_prec_disabled);
return dss_prec_disabled;
}
ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
return (ret);
return ret;
}
bool
@ -56,7 +56,7 @@ extent_dss_prec_set(dss_prec_t dss_prec) {
return (dss_prec != dss_prec_disabled);
}
atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
return (false);
return false;
}
static void *
@ -87,10 +87,10 @@ extent_dss_max_update(void *new_addr) {
}
/* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr) {
return (NULL);
return NULL;
}
return (max_cur);
return max_cur;
}
void *
@ -107,12 +107,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* interpret a large allocation request as a negative increment.
*/
if ((intptr_t)size < 0) {
return (NULL);
return NULL;
}
gap = extent_alloc(tsdn, arena);
if (gap == NULL) {
return (NULL);
return NULL;
}
if (!atomic_read_u(&dss_exhausted)) {
@ -187,7 +187,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
memset(ret, 0, size);
}
}
return (ret);
return ret;
}
/*
* Failure, whether due to OOM or a race with a raw
@ -207,7 +207,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
label_oom:
extent_dalloc(tsdn, arena, gap);
return (NULL);
return NULL;
}
static bool
@ -220,7 +220,7 @@ bool
extent_in_dss(void *addr) {
cassert(have_dss);
return (extent_in_dss_helper(addr, atomic_read_p(&dss_max)));
return extent_in_dss_helper(addr, atomic_read_p(&dss_max));
}
bool
@ -231,7 +231,7 @@ extent_dss_mergeable(void *addr_a, void *addr_b) {
if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b <
(uintptr_t)dss_base) {
return (true);
return true;
}
max = atomic_read_p(&dss_max);

View File

@ -12,14 +12,14 @@ extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero,
alloc_size = size + alignment - PAGE;
/* Beware size_t wrap-around. */
if (alloc_size < size) {
return (NULL);
return NULL;
}
do {
void *pages;
size_t leadsize;
pages = pages_map(NULL, alloc_size, commit);
if (pages == NULL) {
return (NULL);
return NULL;
}
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
@ -28,7 +28,7 @@ extent_alloc_mmap_slow(size_t size, size_t alignment, bool *zero,
assert(ret != NULL);
*zero = true;
return (ret);
return ret;
}
void *
@ -54,18 +54,18 @@ extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
ret = pages_map(new_addr, size, commit);
if (ret == NULL || ret == new_addr) {
return (ret);
return ret;
}
assert(new_addr == NULL);
offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
if (offset != 0) {
pages_unmap(ret, size);
return (extent_alloc_mmap_slow(size, alignment, zero, commit));
return extent_alloc_mmap_slow(size, alignment, zero, commit);
}
assert(ret != NULL);
*zero = true;
return (ret);
return ret;
}
bool
@ -73,5 +73,5 @@ extent_dalloc_mmap(void *addr, size_t size) {
if (config_munmap) {
pages_unmap(addr, size);
}
return (!config_munmap);
return !config_munmap;
}

View File

@ -280,17 +280,17 @@ malloc_initialized(void) {
JEMALLOC_ALWAYS_INLINE_C bool
malloc_init_a0(void) {
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
return (malloc_init_hard_a0());
return malloc_init_hard_a0();
}
return (false);
return false;
}
JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void) {
if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
return (true);
return true;
}
return (false);
return false;
}
/*
@ -301,11 +301,11 @@ malloc_init(void) {
static void *
a0ialloc(size_t size, bool zero, bool is_internal) {
if (unlikely(malloc_init_a0())) {
return (NULL);
return NULL;
}
return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
is_internal, arena_get(TSDN_NULL, 0, true), true));
return iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
is_internal, arena_get(TSDN_NULL, 0, true), true);
}
static void
@ -315,7 +315,7 @@ a0idalloc(extent_t *extent, void *ptr, bool is_internal) {
void *
a0malloc(size_t size) {
return (a0ialloc(size, false, true));
return a0ialloc(size, false, true);
}
void
@ -335,7 +335,7 @@ bootstrap_malloc(size_t size) {
size = 1;
}
return (a0ialloc(size, false, false));
return a0ialloc(size, false, false);
}
void *
@ -348,7 +348,7 @@ bootstrap_calloc(size_t num, size_t size) {
num_size = 1;
}
return (a0ialloc(num_size, true, false));
return a0ialloc(num_size, true, false);
}
void
@ -377,7 +377,7 @@ narenas_total_inc(void) {
unsigned
narenas_total_get(void) {
return (atomic_read_u(&narenas_total));
return atomic_read_u(&narenas_total);
}
/* Create a new arena and insert it into the arenas array at index ind. */
@ -387,7 +387,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(ind <= narenas_total_get());
if (ind > MALLOCX_ARENA_MAX) {
return (NULL);
return NULL;
}
if (ind == narenas_total_get()) {
narenas_total_inc();
@ -400,13 +400,13 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
arena = arena_get(tsdn, ind, false);
if (arena != NULL) {
assert(ind < narenas_auto);
return (arena);
return arena;
}
/* Actually initialize the arena. */
arena = arena_new(tsdn, ind, extent_hooks);
arena_set(ind, arena);
return (arena);
return arena;
}
arena_t *
@ -416,7 +416,7 @@ arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
malloc_mutex_lock(tsdn, &arenas_lock);
arena = arena_init_locked(tsdn, ind, extent_hooks);
malloc_mutex_unlock(tsdn, &arenas_lock);
return (arena);
return arena;
}
static void
@ -534,7 +534,7 @@ label_return:
if (arenas_tdata_old != NULL) {
a0dalloc(arenas_tdata_old);
}
return (tdata);
return tdata;
}
/* Slow path, called only by arena_choose(). */
@ -612,7 +612,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
return (NULL);
return NULL;
}
if (!!j == internal) {
ret = arena;
@ -627,7 +627,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
arena_bind(tsd, 0, true);
}
return (ret);
return ret;
}
void
@ -714,10 +714,10 @@ static char *
secure_getenv(const char *name) {
# ifdef JEMALLOC_HAVE_ISSETUGID
if (issetugid() != 0) {
return (NULL);
return NULL;
}
# endif
return (getenv(name));
return getenv(name);
}
#endif
@ -785,10 +785,10 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
}
return (true);
return true;
default:
malloc_write("<jemalloc>: Malformed conf string\n");
return (true);
return true;
}
}
@ -821,7 +821,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
}
*opts_p = opts;
return (false);
return false;
}
static void
@ -1147,7 +1147,7 @@ malloc_init_hard_needed(void) {
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
return (false);
return false;
}
#ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
@ -1160,10 +1160,10 @@ malloc_init_hard_needed(void) {
spin_adaptive(&spinner);
malloc_mutex_lock(TSDN_NULL, &init_lock);
} while (!malloc_initialized());
return (false);
return false;
}
#endif
return (true);
return true;
}
static bool
@ -1185,23 +1185,23 @@ malloc_init_hard_a0_locked() {
}
pages_boot();
if (base_boot(TSDN_NULL)) {
return (true);
return true;
}
if (extent_boot()) {
return (true);
return true;
}
if (ctl_boot()) {
return (true);
return true;
}
if (config_prof) {
prof_boot1();
}
arena_boot();
if (config_tcache && tcache_boot(TSDN_NULL)) {
return (true);
return true;
}
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) {
return (true);
return true;
}
/*
* Create enough scaffolding to allow recursive allocation in
@ -1217,12 +1217,12 @@ malloc_init_hard_a0_locked() {
*/
if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
== NULL) {
return (true);
return true;
}
malloc_init_state = malloc_init_a0_initialized;
return (false);
return false;
}
static bool
@ -1232,7 +1232,7 @@ malloc_init_hard_a0(void) {
malloc_mutex_lock(TSDN_NULL, &init_lock);
ret = malloc_init_hard_a0_locked();
malloc_mutex_unlock(TSDN_NULL, &init_lock);
return (ret);
return ret;
}
/* Initialize data structures which may trigger recursive allocation. */
@ -1252,17 +1252,17 @@ malloc_init_hard_recursible(void) {
if (opt_abort) {
abort();
}
return (true);
return true;
}
#endif
return (false);
return false;
}
static bool
malloc_init_hard_finish(tsdn_t *tsdn) {
if (malloc_mutex_boot()) {
return (true);
return true;
}
if (opt_narenas == 0) {
@ -1291,7 +1291,7 @@ malloc_init_hard_finish(tsdn_t *tsdn) {
arenas = (arena_t **)base_alloc(tsdn, a0->base, sizeof(arena_t *) *
(MALLOCX_ARENA_MAX+1), CACHELINE);
if (arenas == NULL) {
return (true);
return true;
}
/* Copy the pointer to the one arena that was already initialized. */
arena_set(0, a0);
@ -1299,7 +1299,7 @@ malloc_init_hard_finish(tsdn_t *tsdn) {
malloc_init_state = malloc_init_initialized;
malloc_slow_flag_init();
return (false);
return false;
}
static bool
@ -1312,39 +1312,39 @@ malloc_init_hard(void) {
malloc_mutex_lock(TSDN_NULL, &init_lock);
if (!malloc_init_hard_needed()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock);
return (false);
return false;
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
malloc_mutex_unlock(TSDN_NULL, &init_lock);
return (true);
return true;
}
malloc_mutex_unlock(TSDN_NULL, &init_lock);
/* Recursive allocation relies on functional tsd. */
tsd = malloc_tsd_boot0();
if (tsd == NULL) {
return (true);
return true;
}
if (malloc_init_hard_recursible()) {
return (true);
return true;
}
malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
if (config_prof && prof_boot2(tsd)) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
return (true);
return true;
}
if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
return (true);
return true;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
malloc_tsd_boot1();
return (false);
return false;
}
/*
@ -1679,8 +1679,6 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts) {
witness_assert_lockless(tsd_tsdn(tsd));
/* Success! */
*dopts->result = allocation;
return 0;
@ -1829,7 +1827,7 @@ je_aligned_alloc(size_t alignment, size_t size) {
dopts.alignment = alignment;
imalloc(&sopts, &dopts);
return (ret);
return ret;
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@ -1864,13 +1862,13 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
void *p;
if (tctx == NULL) {
return (NULL);
return NULL;
}
if (usize <= SMALL_MAXCLASS) {
p = iralloc(tsd, extent, old_ptr, old_usize, LARGE_MINCLASS, 0,
false);
if (p == NULL) {
return (NULL);
return NULL;
}
arena_prof_promote(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), p), p,
usize);
@ -1878,7 +1876,7 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
}
return (p);
return p;
}
JEMALLOC_ALWAYS_INLINE_C void *
@ -1901,13 +1899,13 @@ irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
return NULL;
}
extent = (p == old_ptr) ? old_extent : iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, extent, p, usize, tctx, prof_active, true, old_extent,
old_ptr, old_usize, old_tctx);
return (p);
return p;
}
JEMALLOC_INLINE_C void
@ -1977,7 +1975,7 @@ je_realloc(void *ptr, size_t size) {
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
ifree(tsd, ptr, tcache_get(tsd, false), true);
return (NULL);
return NULL;
}
size = 1;
}
@ -2029,7 +2027,7 @@ je_realloc(void *ptr, size_t size) {
}
UTRACE(ptr, size, ret);
witness_assert_lockless(tsdn);
return (ret);
return ret;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
@ -2113,7 +2111,7 @@ je_valloc(size_t size) {
imalloc(&sopts, &dopts);
return (ret);
return ret;
}
#endif
@ -2226,13 +2224,13 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
void *p;
if (tctx == NULL) {
return (NULL);
return NULL;
}
if (usize <= SMALL_MAXCLASS) {
p = iralloct(tsdn, extent, old_ptr, old_usize, LARGE_MINCLASS,
alignment, zero, tcache, arena);
if (p == NULL) {
return (NULL);
return NULL;
}
arena_prof_promote(tsdn, iealloc(tsdn, p), p, usize);
} else {
@ -2240,7 +2238,7 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
zero, tcache, arena);
}
return (p);
return p;
}
JEMALLOC_ALWAYS_INLINE_C void *
@ -2264,7 +2262,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, false);
return (NULL);
return NULL;
}
if (p == old_ptr && alignment != 0) {
@ -2284,7 +2282,7 @@ irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
prof_realloc(tsd, extent, p, *usize, tctx, prof_active, false,
old_extent, old_ptr, old_usize, old_tctx);
return (p);
return p;
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@ -2359,7 +2357,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
}
UTRACE(ptr, size, p);
witness_assert_lockless(tsd_tsdn(tsd));
return (p);
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
@ -2367,7 +2365,7 @@ label_oom:
}
UTRACE(ptr, size, 0);
witness_assert_lockless(tsd_tsdn(tsd));
return (NULL);
return NULL;
}
JEMALLOC_ALWAYS_INLINE_C size_t
@ -2377,11 +2375,11 @@ ixallocx_helper(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t old_usize,
if (ixalloc(tsdn, extent, ptr, old_usize, size, extra, alignment,
zero)) {
return (old_usize);
return old_usize;
}
usize = isalloc(tsdn, extent, ptr);
return (usize);
return usize;
}
static size_t
@ -2391,12 +2389,12 @@ ixallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *ptr,
size_t usize;
if (tctx == NULL) {
return (old_usize);
return old_usize;
}
usize = ixallocx_helper(tsdn, extent, ptr, old_usize, size, extra,
alignment, zero);
return (usize);
return usize;
}
JEMALLOC_ALWAYS_INLINE_C size_t
@ -2440,12 +2438,12 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
}
if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false);
return (usize);
return usize;
}
prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, extent,
ptr, old_usize, old_tctx);
return (usize);
return usize;
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@ -2501,7 +2499,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
label_not_resized:
UTRACE(ptr, size, ptr);
witness_assert_lockless(tsd_tsdn(tsd));
return (usize);
return usize;
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@ -2522,7 +2520,7 @@ je_sallocx(const void *ptr, int flags) {
}
witness_assert_lockless(tsdn);
return (usize);
return usize;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
@ -2566,7 +2564,7 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) {
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
}
witness_assert_lockless(tsdn);
return (usize);
return usize;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
@ -2612,7 +2610,7 @@ je_nallocx(size_t size, int flags) {
assert(size != 0);
if (unlikely(malloc_init())) {
return (0);
return 0;
}
tsdn = tsdn_fetch();
@ -2620,11 +2618,11 @@ je_nallocx(size_t size, int flags) {
usize = inallocx(tsdn, size, flags);
if (unlikely(usize > LARGE_MAXCLASS)) {
return (0);
return 0;
}
witness_assert_lockless(tsdn);
return (usize);
return usize;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
@ -2634,14 +2632,14 @@ je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
tsd_t *tsd;
if (unlikely(malloc_init())) {
return (EAGAIN);
return EAGAIN;
}
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
return ret;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
@ -2650,14 +2648,14 @@ je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
tsdn_t *tsdn;
if (unlikely(malloc_init())) {
return (EAGAIN);
return EAGAIN;
}
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
ret = ctl_nametomib(tsdn, name, mibp, miblenp);
witness_assert_lockless(tsdn);
return (ret);
return ret;
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
@ -2667,14 +2665,14 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
tsd_t *tsd;
if (unlikely(malloc_init())) {
return (EAGAIN);
return EAGAIN;
}
tsd = tsd_fetch();
witness_assert_lockless(tsd_tsdn(tsd));
ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
return ret;
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
@ -2706,7 +2704,7 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
}
witness_assert_lockless(tsdn);
return (ret);
return ret;
}
/*

View File

@ -36,7 +36,7 @@ void *
newImpl(std::size_t size) noexcept(IsNoExcept) {
void *ptr = je_malloc(size);
if (likely(ptr != nullptr))
return (ptr);
return ptr;
while (ptr == nullptr) {
std::new_handler handler;
@ -62,27 +62,27 @@ newImpl(std::size_t size) noexcept(IsNoExcept) {
if (ptr == nullptr && !IsNoExcept)
std::__throw_bad_alloc();
return (ptr);
return ptr;
}
void *
operator new(std::size_t size) {
return (newImpl<false>(size));
return newImpl<false>(size);
}
void *
operator new[](std::size_t size) {
return (newImpl<false>(size));
return newImpl<false>(size);
}
void *
operator new(std::size_t size, const std::nothrow_t &) noexcept {
return (newImpl<true>(size));
return newImpl<true>(size);
}
void *
operator new[](std::size_t size, const std::nothrow_t &) noexcept {
return (newImpl<true>(size));
return newImpl<true>(size);
}
void

View File

@ -7,7 +7,7 @@ void *
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
assert(usize == s2u(usize));
return (large_palloc(tsdn, arena, usize, CACHELINE, zero));
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
}
void *
@ -22,7 +22,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
return (NULL);
return NULL;
}
/*
@ -35,7 +35,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
}
if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
arena, usize, alignment, &is_zeroed)) == NULL) {
return (NULL);
return NULL;
}
/* Insert extent into large. */
@ -58,7 +58,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
}
arena_decay_tick(tsdn, arena);
return (extent_addr_get(extent));
return extent_addr_get(extent);
}
#ifdef JEMALLOC_JET
@ -108,7 +108,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
assert(oldusize > usize);
if (extent_hooks->split == NULL) {
return (true);
return true;
}
/* Split excess pages. */
@ -117,7 +117,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
&extent_hooks, extent, usize + large_pad, usize, diff,
diff);
if (trail == NULL) {
return (true);
return true;
}
if (config_fill && unlikely(opt_junk_free)) {
@ -130,7 +130,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
return (false);
return false;
}
static bool
@ -144,7 +144,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
extent_t *trail;
if (extent_hooks->merge == NULL) {
return (true);
return true;
}
if ((trail = arena_extent_cache_alloc(tsdn, arena, &extent_hooks,
@ -154,13 +154,13 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE,
&is_zeroed_trail, &commit, false)) == NULL) {
return (true);
return true;
}
}
if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
return (true);
return true;
}
if (zero || (config_fill && unlikely(opt_zero))) {
@ -191,7 +191,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
return (false);
return false;
}
bool
@ -209,7 +209,7 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
zero)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min >
@ -217,7 +217,7 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
large_ralloc_no_move_expand(tsdn, extent, usize_min,
zero)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
return false;
}
}
@ -228,26 +228,26 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (extent_usize_get(extent) >= usize_min && extent_usize_get(extent) <=
usize_max) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
return false;
}
/* Attempt to shrink the allocation in-place. */
if (extent_usize_get(extent) > usize_max) {
if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
arena_decay_tick(tsdn, extent_arena_get(extent));
return (false);
return false;
}
}
return (true);
return true;
}
static void *
large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero) {
if (alignment <= CACHELINE) {
return (large_malloc(tsdn, arena, usize, zero));
return large_malloc(tsdn, arena, usize, zero);
}
return (large_palloc(tsdn, arena, usize, alignment, zero));
return large_palloc(tsdn, arena, usize, alignment, zero);
}
void *
@ -264,7 +264,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
return (extent_addr_get(extent));
return extent_addr_get(extent);
}
/*
@ -274,7 +274,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
*/
ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, zero);
if (ret == NULL) {
return (NULL);
return NULL;
}
copysize = (usize < extent_usize_get(extent)) ? usize :
@ -282,7 +282,7 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
memcpy(ret, extent_addr_get(extent), copysize);
isdalloct(tsdn, extent, extent_addr_get(extent),
extent_usize_get(extent), tcache, true);
return (ret);
return ret;
}
/*
@ -321,12 +321,12 @@ large_dalloc(tsdn_t *tsdn, extent_t *extent) {
size_t
large_salloc(tsdn_t *tsdn, const extent_t *extent) {
return (extent_usize_get(extent));
return extent_usize_get(extent);
}
prof_tctx_t *
large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
return (extent_prof_tctx_get(extent));
return extent_prof_tctx_get(extent);
}
void

View File

@ -54,7 +54,7 @@ pthread_create(pthread_t *__restrict thread,
pthread_once(&once_control, pthread_create_once);
return (pthread_create_fptr(thread, attr, start_routine, arg));
return pthread_create_fptr(thread, attr, start_routine, arg);
}
#endif
@ -74,7 +74,7 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
_CRT_SPINCOUNT)) {
return (true);
return true;
}
# endif
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
@ -88,26 +88,26 @@ malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
} else {
if (_pthread_mutex_init_calloc_cb(&mutex->lock,
bootstrap_calloc) != 0) {
return (true);
return true;
}
}
#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0) {
return (true);
return true;
}
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
return true;
}
pthread_mutexattr_destroy(&attr);
#endif
if (config_debug) {
witness_init(&mutex->witness, name, rank, NULL, NULL);
}
return (false);
return false;
}
void
@ -143,10 +143,10 @@ malloc_mutex_boot(void) {
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
bootstrap_calloc) != 0) {
return (true);
return true;
}
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
return (false);
return false;
}

View File

@ -14,17 +14,17 @@ nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
uint64_t
nstime_ns(const nstime_t *time) {
return (time->ns);
return time->ns;
}
uint64_t
nstime_sec(const nstime_t *time) {
return (time->ns / BILLION);
return time->ns / BILLION;
}
uint64_t
nstime_nsec(const nstime_t *time) {
return (time->ns % BILLION);
return time->ns % BILLION;
}
void
@ -34,7 +34,7 @@ nstime_copy(nstime_t *time, const nstime_t *source) {
int
nstime_compare(const nstime_t *a, const nstime_t *b) {
return ((a->ns > b->ns) - (a->ns < b->ns));
return (a->ns > b->ns) - (a->ns < b->ns);
}
void
@ -70,7 +70,7 @@ uint64_t
nstime_divide(const nstime_t *time, const nstime_t *divisor) {
assert(divisor->ns != 0);
return (time->ns / divisor->ns);
return time->ns / divisor->ns;
}
#ifdef _WIN32
@ -126,7 +126,7 @@ nstime_get(nstime_t *time) {
#endif
bool
nstime_monotonic(void) {
return (NSTIME_MONOTONIC);
return NSTIME_MONOTONIC;
#undef NSTIME_MONOTONIC
}
#ifdef JEMALLOC_JET
@ -149,10 +149,10 @@ nstime_update(nstime_t *time) {
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
return (true);
return true;
}
return (false);
return false;
}
#ifdef JEMALLOC_JET
#undef nstime_update

View File

@ -58,7 +58,7 @@ pages_map(void *addr, size_t size, bool *commit) {
#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
return ret;
}
void
@ -98,12 +98,12 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
pages_unmap(addr, alloc_size);
new_addr = pages_map(ret, size, commit);
if (new_addr == ret) {
return (ret);
return ret;
}
if (new_addr) {
pages_unmap(new_addr, size);
}
return (NULL);
return NULL;
}
#else
{
@ -115,7 +115,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
if (trailsize != 0) {
pages_unmap((void *)((uintptr_t)ret + size), trailsize);
}
return (ret);
return ret;
}
#endif
}
@ -123,7 +123,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
static bool
pages_commit_impl(void *addr, size_t size, bool commit) {
if (os_overcommits) {
return (true);
return true;
}
#ifdef _WIN32
@ -135,7 +135,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
-1, 0);
if (result == MAP_FAILED) {
return (true);
return true;
}
if (result != addr) {
/*
@ -143,27 +143,27 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
* place.
*/
pages_unmap(result, size);
return (true);
return true;
}
return (false);
return false;
}
#endif
}
bool
pages_commit(void *addr, size_t size) {
return (pages_commit_impl(addr, size, true));
return pages_commit_impl(addr, size, true);
}
bool
pages_decommit(void *addr, size_t size) {
return (pages_commit_impl(addr, size, false));
return pages_commit_impl(addr, size, false);
}
bool
pages_purge_lazy(void *addr, size_t size) {
if (!pages_can_purge_lazy) {
return (true);
return true;
}
#ifdef _WIN32
@ -173,13 +173,13 @@ pages_purge_lazy(void *addr, size_t size) {
#else
not_reached();
#endif
return (false);
return false;
}
bool
pages_purge_forced(void *addr, size_t size) {
if (!pages_can_purge_forced) {
return (true);
return true;
}
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
@ -197,7 +197,7 @@ pages_huge(void *addr, size_t size) {
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
#else
return (true);
return true;
#endif
}
@ -209,7 +209,7 @@ pages_nohuge(void *addr, size_t size) {
#ifdef JEMALLOC_THP
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
return (false);
return false;
#endif
}
@ -221,7 +221,7 @@ os_overcommits_sysctl(void) {
sz = sizeof(vm_overcommit);
if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) {
return (false); /* Error. */
return false; /* Error. */
}
return ((vm_overcommit & 0x3) == 0);
@ -246,7 +246,7 @@ os_overcommits_proc(void) {
fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
#endif
if (fd == -1) {
return (false); /* Error. */
return false; /* Error. */
}
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
@ -262,7 +262,7 @@ os_overcommits_proc(void) {
#endif
if (nread < 1) {
return (false); /* Error. */
return false; /* Error. */
}
/*
* /proc/sys/vm/overcommit_memory meanings:

View File

@ -149,7 +149,7 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
b_tctx_uid);
}
}
return (ret);
return ret;
}
rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
@ -164,7 +164,7 @@ prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
if (ret == 0) {
ret = (a_len > b_len) - (a_len < b_len);
}
return (ret);
return ret;
}
rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
@ -183,7 +183,7 @@ prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
}
return (ret);
return ret;
}
rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
@ -319,7 +319,7 @@ static _Unwind_Reason_Code
prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
cassert(config_prof);
return (_URC_NO_REASON);
return _URC_NO_REASON;
}
static _Unwind_Reason_Code
@ -331,15 +331,15 @@ prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
ip = (void *)_Unwind_GetIP(context);
if (ip == NULL) {
return (_URC_END_OF_STACK);
return _URC_END_OF_STACK;
}
data->bt->vec[data->bt->len] = ip;
data->bt->len++;
if (data->bt->len == data->max) {
return (_URC_END_OF_STACK);
return _URC_END_OF_STACK;
}
return (_URC_NO_REASON);
return _URC_NO_REASON;
}
void
@ -525,12 +525,12 @@ static malloc_mutex_t *
prof_gctx_mutex_choose(void) {
unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
}
static malloc_mutex_t *
prof_tdata_mutex_choose(uint64_t thr_uid) {
return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
}
static prof_gctx_t *
@ -543,7 +543,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
true);
if (gctx == NULL) {
return (NULL);
return NULL;
}
gctx->lock = prof_gctx_mutex_choose();
/*
@ -556,7 +556,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
gctx->bt.vec = gctx->vec;
gctx->bt.len = bt->len;
return (gctx);
return gctx;
}
static void
@ -600,29 +600,29 @@ prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
if (opt_prof_accum) {
return (false);
return false;
}
if (tctx->cnts.curobjs != 0) {
return (false);
return false;
}
if (tctx->prepared) {
return (false);
return false;
}
return (true);
return true;
}
static bool
prof_gctx_should_destroy(prof_gctx_t *gctx) {
if (opt_prof_accum) {
return (false);
return false;
}
if (!tctx_tree_empty(&gctx->tctxs)) {
return (false);
return false;
}
if (gctx->nlimbo != 0) {
return (false);
return false;
}
return (true);
return true;
}
static void
@ -721,7 +721,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
if (gctx.v == NULL) {
prof_leave(tsd, tdata);
return (true);
return true;
}
btkey.p = &gctx.p->bt;
if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
@ -729,7 +729,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_leave(tsd, tdata);
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), gctx.v),
gctx.v, NULL, true, true);
return (true);
return true;
}
new_gctx = true;
} else {
@ -747,7 +747,7 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
*p_btkey = btkey.v;
*p_gctx = gctx.p;
*p_new_gctx = new_gctx;
return (false);
return false;
}
prof_tctx_t *
@ -763,7 +763,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) {
return (NULL);
return NULL;
}
malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
@ -783,7 +783,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
*/
if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
&new_gctx)) {
return (NULL);
return NULL;
}
/* Link a prof_tctx_t into gctx for this thread. */
@ -794,7 +794,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
if (new_gctx) {
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
}
return (NULL);
return NULL;
}
ret.p->tdata = tdata;
ret.p->thr_uid = tdata->thr_uid;
@ -813,7 +813,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
}
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), ret.v),
ret.v, NULL, true, true);
return (NULL);
return NULL;
}
malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
ret.p->state = prof_tctx_state_nominal;
@ -822,7 +822,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
}
return (ret.p);
return ret.p;
}
/*
@ -887,7 +887,7 @@ prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
(*tdata_count)++;
return (NULL);
return NULL;
}
size_t
@ -901,7 +901,7 @@ prof_tdata_count(void) {
(void *)&tdata_count);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
return (tdata_count);
return tdata_count;
}
#endif
@ -915,14 +915,14 @@ prof_bt_count(void) {
tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) {
return (0);
return 0;
}
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
return (bt_count);
return bt_count;
}
#endif
@ -943,7 +943,7 @@ prof_dump_open(bool propagate_err, const char *filename) {
}
}
return (fd);
return fd;
}
#ifdef JEMALLOC_JET
#undef prof_dump_open
@ -971,7 +971,7 @@ prof_dump_flush(bool propagate_err) {
}
prof_dump_buf_end = 0;
return (ret);
return ret;
}
static bool
@ -983,7 +983,7 @@ prof_dump_close(bool propagate_err) {
close(prof_dump_fd);
prof_dump_fd = -1;
return (ret);
return ret;
}
static bool
@ -998,7 +998,7 @@ prof_dump_write(bool propagate_err, const char *s) {
/* Flush the buffer if it is full. */
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
if (prof_dump_flush(propagate_err) && propagate_err) {
return (true);
return true;
}
}
@ -1014,7 +1014,7 @@ prof_dump_write(bool propagate_err, const char *s) {
i += n;
}
return (false);
return false;
}
JEMALLOC_FORMAT_PRINTF(2, 3)
@ -1029,7 +1029,7 @@ prof_dump_printf(bool propagate_err, const char *format, ...) {
va_end(ap);
ret = prof_dump_write(propagate_err, buf);
return (ret);
return ret;
}
static void
@ -1093,7 +1093,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
not_reached();
}
return (NULL);
return NULL;
}
struct prof_tctx_dump_iter_arg_s {
@ -1120,13 +1120,13 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
tctx->dump_cnts.accumbytes)) {
return (tctx);
return tctx;
}
break;
default:
not_reached();
}
return (NULL);
return NULL;
}
static prof_tctx_t *
@ -1152,7 +1152,7 @@ prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
ret = NULL;
label_return:
return (ret);
return ret;
}
static void
@ -1192,7 +1192,7 @@ prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
}
malloc_mutex_unlock(arg->tsdn, gctx->lock);
return (NULL);
return NULL;
}
static void
@ -1279,7 +1279,7 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
}
malloc_mutex_unlock(arg->tsdn, tdata->lock);
return (NULL);
return NULL;
}
static prof_tdata_t *
@ -1288,7 +1288,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
bool propagate_err = *(bool *)arg;
if (!tdata->dumping) {
return (NULL);
return NULL;
}
if (prof_dump_printf(propagate_err,
@ -1298,9 +1298,9 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
tdata->cnt_summed.accumbytes,
(tdata->thread_name != NULL) ? " " : "",
(tdata->thread_name != NULL) ? tdata->thread_name : "")) {
return (tdata);
return tdata;
}
return (NULL);
return NULL;
}
#ifdef JEMALLOC_JET
@ -1316,14 +1316,14 @@ prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) {
" t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
return (true);
return true;
}
malloc_mutex_lock(tsdn, &tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL);
malloc_mutex_unlock(tsdn, &tdatas_mtx);
return (ret);
return ret;
}
#ifdef JEMALLOC_JET
#undef prof_dump_header
@ -1383,7 +1383,7 @@ prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
ret = false;
label_return:
return (ret);
return ret;
}
#ifndef _WIN32
@ -1399,16 +1399,16 @@ prof_open_maps(const char *format, ...) {
va_end(ap);
mfd = open(filename, O_RDONLY);
return (mfd);
return mfd;
}
#endif
static int
prof_getpid(void) {
#ifdef _WIN32
return (GetCurrentProcessId());
return GetCurrentProcessId();
#else
return (getpid());
return getpid();
#endif
}
@ -1464,7 +1464,7 @@ label_return:
if (mfd != -1) {
close(mfd);
}
return (ret);
return ret;
}
/*
@ -1524,7 +1524,7 @@ prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
ret = NULL;
label_return:
malloc_mutex_unlock(arg->tsdn, gctx->lock);
return (ret);
return ret;
}
static void
@ -1773,13 +1773,13 @@ prof_mdump(tsd_t *tsd, const char *filename) {
cassert(config_prof);
if (!opt_prof || !prof_booted) {
return (true);
return true;
}
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0') {
return (true);
return true;
}
malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
@ -1787,7 +1787,7 @@ prof_mdump(tsd_t *tsd, const char *filename) {
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(tsd, true, filename, false));
return prof_dump(tsd, true, filename, false);
}
void
@ -1837,7 +1837,7 @@ prof_bt_keycomp(const void *k1, const void *k2) {
cassert(config_prof);
if (bt1->len != bt2->len) {
return (false);
return false;
}
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
@ -1851,7 +1851,7 @@ prof_thr_uid_alloc(tsdn_t *tsdn) {
next_thr_uid++;
malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
return (thr_uid);
return thr_uid;
}
static prof_tdata_t *
@ -1866,7 +1866,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
size2index(sizeof(prof_tdata_t)), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (tdata == NULL) {
return (NULL);
return NULL;
}
tdata->lock = prof_tdata_mutex_choose(thr_uid);
@ -1881,7 +1881,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
prof_bt_keycomp)) {
idalloctm(tsd_tsdn(tsd), iealloc(tsd_tsdn(tsd), tdata), tdata,
NULL, true, true);
return (NULL);
return NULL;
}
tdata->prng_state = (uint64_t)(uintptr_t)tdata;
@ -1898,24 +1898,24 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata_tree_insert(&tdatas, tdata);
malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
return (tdata);
return tdata;
}
prof_tdata_t *
prof_tdata_init(tsd_t *tsd) {
return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
}
static bool
prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
if (tdata->attached && !even_if_attached) {
return (false);
return false;
}
if (ckh_count(&tdata->bt2tctx) != 0) {
return (false);
return false;
}
return (true);
return true;
}
static bool
@ -1923,7 +1923,7 @@ prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached) {
malloc_mutex_assert_owner(tsdn, tdata->lock);
return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
}
static void
@ -1985,8 +1985,8 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
active));
return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
active);
}
static bool
@ -2003,7 +2003,7 @@ prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
}
malloc_mutex_unlock(tsdn, tdata->lock);
return (destroy_tdata);
return destroy_tdata;
}
static prof_tdata_t *
@ -2062,7 +2062,7 @@ prof_active_get(tsdn_t *tsdn) {
malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_current = prof_active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
return (prof_active_current);
return prof_active_current;
}
bool
@ -2073,7 +2073,7 @@ prof_active_set(tsdn_t *tsdn, bool active) {
prof_active_old = prof_active;
prof_active = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
return (prof_active_old);
return prof_active_old;
}
const char *
@ -2082,7 +2082,7 @@ prof_thread_name_get(tsd_t *tsd) {
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return ("");
return "";
}
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
@ -2093,21 +2093,21 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
size_t size;
if (thread_name == NULL) {
return (NULL);
return NULL;
}
size = strlen(thread_name) + 1;
if (size == 1) {
return ("");
return "";
}
ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (ret == NULL) {
return (NULL);
return NULL;
}
memcpy(ret, thread_name, size);
return (ret);
return ret;
}
int
@ -2118,23 +2118,23 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return (EAGAIN);
return EAGAIN;
}
/* Validate input. */
if (thread_name == NULL) {
return (EFAULT);
return EFAULT;
}
for (i = 0; thread_name[i] != '\0'; i++) {
char c = thread_name[i];
if (!isgraph(c) && !isblank(c)) {
return (EFAULT);
return EFAULT;
}
}
s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
if (s == NULL) {
return (EAGAIN);
return EAGAIN;
}
if (tdata->thread_name != NULL) {
@ -2145,7 +2145,7 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
if (strlen(s) > 0) {
tdata->thread_name = s;
}
return (0);
return 0;
}
bool
@ -2154,9 +2154,9 @@ prof_thread_active_get(tsd_t *tsd) {
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return (false);
return false;
}
return (tdata->active);
return tdata->active;
}
bool
@ -2165,10 +2165,10 @@ prof_thread_active_set(tsd_t *tsd, bool active) {
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return (true);
return true;
}
tdata->active = active;
return (false);
return false;
}
bool
@ -2178,7 +2178,7 @@ prof_thread_active_init_get(tsdn_t *tsdn) {
malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
return (active_init);
return active_init;
}
bool
@ -2189,7 +2189,7 @@ prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
return (active_init_old);
return active_init_old;
}
bool
@ -2199,7 +2199,7 @@ prof_gdump_get(tsdn_t *tsdn) {
malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
return (prof_gdump_current);
return prof_gdump_current;
}
bool
@ -2210,7 +2210,7 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) {
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
return (prof_gdump_old);
return prof_gdump_old;
}
void
@ -2257,50 +2257,50 @@ prof_boot2(tsd_t *tsd) {
prof_active = opt_prof_active;
if (malloc_mutex_init(&prof_active_mtx, "prof_active",
WITNESS_RANK_PROF_ACTIVE)) {
return (true);
return true;
}
prof_gdump_val = opt_prof_gdump;
if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
WITNESS_RANK_PROF_GDUMP)) {
return (true);
return true;
}
prof_thread_active_init = opt_prof_thread_active_init;
if (malloc_mutex_init(&prof_thread_active_init_mtx,
"prof_thread_active_init",
WITNESS_RANK_PROF_THREAD_ACTIVE_INIT)) {
return (true);
return true;
}
if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp)) {
return (true);
return true;
}
if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
WITNESS_RANK_PROF_BT2GCTX)) {
return (true);
return true;
}
tdata_tree_new(&tdatas);
if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
WITNESS_RANK_PROF_TDATAS)) {
return (true);
return true;
}
next_thr_uid = 0;
if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
WITNESS_RANK_PROF_NEXT_THR_UID)) {
return (true);
return true;
}
if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
WITNESS_RANK_PROF_DUMP_SEQ)) {
return (true);
return true;
}
if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
WITNESS_RANK_PROF_DUMP)) {
return (true);
return true;
}
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
@ -2315,12 +2315,12 @@ prof_boot2(tsd_t *tsd) {
b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
CACHELINE);
if (gctx_locks == NULL) {
return (true);
return true;
}
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
WITNESS_RANK_PROF_GCTX)) {
return (true);
return true;
}
}
@ -2328,12 +2328,12 @@ prof_boot2(tsd_t *tsd) {
b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
CACHELINE);
if (tdata_locks == NULL) {
return (true);
return true;
}
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
WITNESS_RANK_PROF_TDATA)) {
return (true);
return true;
}
}
}
@ -2348,7 +2348,7 @@ prof_boot2(tsd_t *tsd) {
prof_booted = true;
return (false);
return false;
}
void

View File

@ -60,7 +60,7 @@ rtree_new(rtree_t *rtree, unsigned bits) {
malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE);
return (false);
return false;
}
#ifdef JEMALLOC_JET
@ -69,8 +69,8 @@ rtree_new(rtree_t *rtree, unsigned bits) {
#endif
static rtree_elm_t *
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return ((rtree_elm_t *)base_alloc(tsdn, b0get(), nelms *
sizeof(rtree_elm_t), CACHELINE));
return (rtree_elm_t *)base_alloc(tsdn, b0get(), nelms *
sizeof(rtree_elm_t), CACHELINE);
}
#ifdef JEMALLOC_JET
#undef rtree_node_alloc
@ -137,25 +137,25 @@ rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
rtree->levels[level].bits);
if (node == NULL) {
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return (NULL);
return NULL;
}
atomic_write_p((void **)elmp, node);
}
malloc_mutex_unlock(tsdn, &rtree->init_lock);
return (node);
return node;
}
rtree_elm_t *
rtree_subtree_read_hard(tsdn_t *tsdn, rtree_t *rtree, unsigned level) {
return (rtree_node_init(tsdn, rtree, level,
&rtree->levels[level].subtree));
return rtree_node_init(tsdn, rtree, level,
&rtree->levels[level].subtree);
}
rtree_elm_t *
rtree_child_read_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm,
unsigned level) {
return (rtree_node_init(tsdn, rtree, level+1, &elm->child));
return rtree_node_init(tsdn, rtree, level+1, &elm->child);
}
static int
@ -167,7 +167,7 @@ rtree_elm_witness_comp(const witness_t *a, void *oa, const witness_t *b,
assert(ka != 0);
assert(kb != 0);
return ((ka > kb) - (ka < kb));
return (ka > kb) - (ka < kb);
}
static witness_t *
@ -192,7 +192,7 @@ rtree_elm_witness_alloc(tsd_t *tsd, uintptr_t key, const rtree_elm_t *elm) {
}
}
assert(witness != NULL);
return (witness);
return witness;
}
static witness_t *
@ -205,7 +205,7 @@ rtree_elm_witness_find(tsd_t *tsd, const rtree_elm_t *elm) {
rtree_elm_witness_t *rew = &witnesses->witnesses[i];
if (rew->elm == elm) {
return (&rew->witness);
return &rew->witness;
}
}
not_reached();

View File

@ -25,7 +25,7 @@ static tcaches_t *tcaches_avail;
size_t
tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return (arena_salloc(tsdn, iealloc(tsdn, ptr), ptr));
return arena_salloc(tsdn, iealloc(tsdn, ptr), ptr);
}
void
@ -82,7 +82,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
}
ret = tcache_alloc_easy(tbin, tcache_success);
return (ret);
return ret;
}
void
@ -297,13 +297,13 @@ tcache_get_hard(tsd_t *tsd) {
if (tsd_nominal(tsd)) {
tcache_enabled_set(false); /* Memoize. */
}
return (NULL);
return NULL;
}
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL)) {
return (NULL);
return NULL;
}
return (tcache_create(tsd_tsdn(tsd), arena));
return tcache_create(tsd_tsdn(tsd), arena);
}
tcache_t *
@ -323,7 +323,7 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) {
tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
if (tcache == NULL) {
return (NULL);
return NULL;
}
tcache_arena_associate(tsdn, tcache, arena);
@ -343,7 +343,7 @@ tcache_create(tsdn_t *tsdn, arena_t *arena) {
(uintptr_t)stack_offset);
}
return (tcache);
return tcache;
}
static void
@ -432,20 +432,20 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
* (MALLOCX_TCACHE_MAX+1), CACHELINE);
if (tcaches == NULL) {
return (true);
return true;
}
}
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
return (true);
return true;
}
arena = arena_ichoose(tsd, NULL);
if (unlikely(arena == NULL)) {
return (true);
return true;
}
tcache = tcache_create(tsd_tsdn(tsd), arena);
if (tcache == NULL) {
return (true);
return true;
}
if (tcaches_avail != NULL) {
@ -460,7 +460,7 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
tcaches_past++;
}
return (false);
return false;
}
static void
@ -503,7 +503,7 @@ tcache_boot(tsdn_t *tsdn) {
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
* sizeof(tcache_bin_info_t), CACHELINE);
if (tcache_bin_info == NULL) {
return (true);
return true;
}
stack_nelms = 0;
for (i = 0; i < NBINS; i++) {
@ -525,5 +525,5 @@ tcache_boot(tsdn_t *tsdn) {
stack_nelms += tcache_bin_info[i].ncached_max;
}
return (false);
return false;
}

View File

@ -13,7 +13,7 @@ malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
void *
malloc_tsd_malloc(size_t size) {
return (a0malloc(CACHELINE_CEILING(size)));
return a0malloc(CACHELINE_CEILING(size));
}
void
@ -109,11 +109,11 @@ malloc_tsd_boot0(void) {
ncleanups = 0;
if (tsd_boot0()) {
return (NULL);
return NULL;
}
tsd = tsd_fetch();
*tsd_arenas_tdata_bypassp_get(tsd) = true;
return (tsd);
return tsd;
}
void
@ -137,7 +137,7 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
default:
break;
}
return (true);
return true;
}
#ifdef _MSC_VER
@ -167,7 +167,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
malloc_mutex_unlock(TSDN_NULL, &head->lock);
return (iter->data);
return iter->data;
}
}
/* Insert block into list. */
@ -175,7 +175,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
malloc_mutex_unlock(TSDN_NULL, &head->lock);
return (NULL);
return NULL;
}
void

View File

@ -87,16 +87,16 @@ buferror(int err, char *buf, size_t buflen) {
#ifdef _WIN32
FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
(LPSTR)buf, (DWORD)buflen, NULL);
return (0);
return 0;
#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
return 0;
#else
return (strerror_r(err, buf, buflen));
return strerror_r(err, buf, buflen);
#endif
}
@ -218,7 +218,7 @@ label_return:
*endptr = (char *)p;
}
}
return (ret);
return ret;
}
static char *
@ -260,7 +260,7 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) {
}}
*slen_p = U2S_BUFSIZE - 1 - i;
return (&s[i]);
return &s[i];
}
static char *
@ -288,7 +288,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
break;
default: not_reached();
}
return (s);
return s;
}
static char *
@ -299,7 +299,7 @@ o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) {
(*slen_p)++;
*s = '0';
}
return (s);
return s;
}
static char *
@ -310,7 +310,7 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
(*slen_p) += 2;
memcpy(s, uppercase ? "0X" : "0x", 2);
}
return (s);
return s;
}
size_t
@ -593,7 +593,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
return (i);
return i;
}
JEMALLOC_FORMAT_PRINTF(3, 4)
@ -606,7 +606,7 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) {
ret = malloc_vsnprintf(str, size, format, ap);
va_end(ap);
return (ret);
return ret;
}
void

View File

@ -135,17 +135,17 @@ zone_size(malloc_zone_t *zone, const void *ptr) {
* not work in practice, we must check all pointers to assure that they
* reside within a mapped extent before determining size.
*/
return (ivsalloc(tsdn_fetch(), ptr));
return ivsalloc(tsdn_fetch(), ptr);
}
static void *
zone_malloc(malloc_zone_t *zone, size_t size) {
return (je_malloc(size));
return je_malloc(size);
}
static void *
zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
return (je_calloc(num, size));
return je_calloc(num, size);
}
static void *
@ -154,7 +154,7 @@ zone_valloc(malloc_zone_t *zone, size_t size) {
je_posix_memalign(&ret, PAGE, size);
return (ret);
return ret;
}
static void
@ -170,10 +170,10 @@ zone_free(malloc_zone_t *zone, void *ptr) {
static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (ivsalloc(tsdn_fetch(), ptr) != 0) {
return (je_realloc(ptr, size));
return je_realloc(ptr, size);
}
return (realloc(ptr, size));
return realloc(ptr, size);
}
static void *
@ -182,7 +182,7 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
je_posix_memalign(&ret, alignment, size);
return (ret);
return ret;
}
static void
@ -240,7 +240,7 @@ zone_good_size(malloc_zone_t *zone, size_t size) {
if (size == 0) {
size = 1;
}
return (s2u(size));
return s2u(size);
}
static kern_return_t
@ -368,10 +368,10 @@ zone_default_get(void) {
}
if (num_zones) {
return (zones[0]);
return zones[0];
}
return (malloc_default_zone());
return malloc_default_zone();
}
/* As written, this function can only promote jemalloc_zone. */

View File

@ -26,5 +26,5 @@ btalloc_##n(size_t size, unsigned bits) { \
} \
/* Intentionally sabotage tail call optimization. */ \
assert_ptr_not_null(p, "Unexpected mallocx() failure"); \
return (p); \
return p; \
}

View File

@ -86,12 +86,12 @@ extent_alloc_hook(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
"Wrong hook function");
called_alloc = true;
if (!try_alloc) {
return (NULL);
return NULL;
}
ret = default_hooks->alloc(default_hooks, new_addr, size, alignment,
zero, commit, 0);
did_alloc = (ret != NULL);
return (ret);
return ret;
}
static bool
@ -108,11 +108,11 @@ extent_dalloc_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc) {
return (true);
return true;
}
err = default_hooks->dalloc(default_hooks, addr, size, committed, 0);
did_dalloc = !err;
return (err);
return err;
}
static bool
@ -129,12 +129,12 @@ extent_commit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_commit = true;
if (!try_commit) {
return (true);
return true;
}
err = default_hooks->commit(default_hooks, addr, size, offset, length,
0);
did_commit = !err;
return (err);
return err;
}
static bool
@ -151,12 +151,12 @@ extent_decommit_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_decommit = true;
if (!try_decommit) {
return (true);
return true;
}
err = default_hooks->decommit(default_hooks, addr, size, offset, length,
0);
did_decommit = !err;
return (err);
return err;
}
static bool
@ -173,13 +173,13 @@ extent_purge_lazy_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_purge_lazy = true;
if (!try_purge_lazy) {
return (true);
return true;
}
err = default_hooks->purge_lazy == NULL ||
default_hooks->purge_lazy(default_hooks, addr, size, offset, length,
0);
did_purge_lazy = !err;
return (err);
return err;
}
static bool
@ -196,13 +196,13 @@ extent_purge_forced_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_purge_forced = true;
if (!try_purge_forced) {
return (true);
return true;
}
err = default_hooks->purge_forced == NULL ||
default_hooks->purge_forced(default_hooks, addr, size, offset,
length, 0);
did_purge_forced = !err;
return (err);
return err;
}
static bool
@ -220,13 +220,13 @@ extent_split_hook(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_split = true;
if (!try_split) {
return (true);
return true;
}
err = (default_hooks->split == NULL ||
default_hooks->split(default_hooks, addr, size, size_a, size_b,
committed, 0));
did_split = !err;
return (err);
return err;
}
static bool
@ -244,13 +244,13 @@ extent_merge_hook(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
"Wrong hook function");
called_merge = true;
if (!try_merge) {
return (true);
return true;
}
err = (default_hooks->merge == NULL ||
default_hooks->merge(default_hooks, addr_a, size_a, addr_b, size_b,
committed, 0));
did_merge = !err;
return (err);
return err;
}
static void

View File

@ -36,9 +36,9 @@ ln_gamma(double x) {
z = 1.0 / (x * x);
return (f + (x-0.5) * log(x) - x + 0.918938533204673 +
return f + (x-0.5) * log(x) - x + 0.918938533204673 +
(((-0.000595238095238 * z + 0.000793650793651) * z -
0.002777777777778) * z + 0.083333333333333) / x);
0.002777777777778) * z + 0.083333333333333) / x;
}
/*
@ -60,7 +60,7 @@ i_gamma(double x, double p, double ln_gamma_p) {
assert(x >= 0.0);
if (x == 0.0) {
return (0.0);
return 0.0;
}
acu = 1.0e-10;
@ -80,7 +80,7 @@ i_gamma(double x, double p, double ln_gamma_p) {
gin += term;
if (term <= acu) {
gin *= factor / p;
return (gin);
return gin;
}
}
} else {
@ -107,7 +107,7 @@ i_gamma(double x, double p, double ln_gamma_p) {
dif = fabs(gin - rn);
if (dif <= acu && dif <= acu * rn) {
gin = 1.0 - factor * gin;
return (gin);
return gin;
}
gin = rn;
}
@ -144,7 +144,7 @@ pt_norm(double p) {
if (fabs(q) <= 0.425) {
/* p close to 1/2. */
r = 0.180625 - q * q;
return (q * (((((((2.5090809287301226727e3 * r +
return q * (((((((2.5090809287301226727e3 * r +
3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r
+ 4.5921953931549871457e4) * r + 1.3731693765509461125e4) *
r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2)
@ -153,7 +153,7 @@ pt_norm(double p) {
2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r
+ 2.1213794301586595867e4) * r + 5.3941960214247511077e3) *
r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1)
* r + 1.0));
* r + 1.0);
} else {
if (q < 0.0) {
r = p;
@ -204,7 +204,7 @@ pt_norm(double p) {
if (q < 0.0) {
ret = -ret;
}
return (ret);
return ret;
}
}
@ -240,7 +240,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2) {
/* Starting approximation for small Chi^2. */
ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx);
if (ch - e < 0.0) {
return (ch);
return ch;
}
} else {
if (df > 0.32) {
@ -279,7 +279,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2) {
q = ch;
p1 = 0.5 * ch;
if (p1 < 0.0) {
return (-1.0);
return -1.0;
}
p2 = p - i_gamma(p1, xx, ln_gamma_df_2);
t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch));
@ -301,7 +301,7 @@ pt_chi2(double p, double df, double ln_gamma_df_2) {
}
}
return (ch);
return ch;
}
/*
@ -311,6 +311,6 @@ pt_chi2(double p, double df, double ln_gamma_df_2) {
*/
JEMALLOC_INLINE double
pt_gamma(double p, double shape, double scale, double ln_gamma_shape) {
return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale);
return pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale;
}
#endif

View File

@ -38,11 +38,11 @@ a_attr bool \
a_prefix##init(a_mq_type *mq) { \
\
if (mtx_init(&mq->lock)) { \
return (true); \
return true; \
} \
ql_new(&mq->msgs); \
mq->count = 0; \
return (false); \
return false; \
} \
a_attr void \
a_prefix##fini(a_mq_type *mq) { \
@ -55,7 +55,7 @@ a_prefix##count(a_mq_type *mq) { \
mtx_lock(&mq->lock); \
count = mq->count; \
mtx_unlock(&mq->lock); \
return (count); \
return count; \
} \
a_attr a_mq_msg_type * \
a_prefix##tryget(a_mq_type *mq) { \
@ -68,7 +68,7 @@ a_prefix##tryget(a_mq_type *mq) { \
mq->count--; \
} \
mtx_unlock(&mq->lock); \
return (msg); \
return msg; \
} \
a_attr a_mq_msg_type * \
a_prefix##get(a_mq_type *mq) { \
@ -77,7 +77,7 @@ a_prefix##get(a_mq_type *mq) { \
\
msg = a_prefix##tryget(mq); \
if (msg != NULL) { \
return (msg); \
return msg; \
} \
\
ns = 1; \
@ -85,7 +85,7 @@ a_prefix##get(a_mq_type *mq) { \
mq_nanosleep(ns); \
msg = a_prefix##tryget(mq); \
if (msg != NULL) { \
return (msg); \
return msg; \
} \
if (ns < 1000*1000*1000) { \
/* Double sleep time, up to max 1 second. */ \

View File

@ -41,7 +41,7 @@ thd_start(void *arg) {
assert_ptr_not_null(p, "Unexpected mallocx() error");
dallocx(p, 0);
return (NULL);
return NULL;
}
TEST_BEGIN(test_MALLOCX_ARENA) {
@ -61,6 +61,6 @@ TEST_END
int
main(void) {
return (test(
test_MALLOCX_ARENA));
return test(
test_MALLOCX_ARENA);
}

View File

@ -126,8 +126,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_alignment_errors,
test_oom_errors,
test_alignment_and_size));
test_alignment_and_size);
}

View File

@ -91,12 +91,12 @@ thd_start(void *arg) {
"Deallocated memory counter should increase by at least the amount "
"explicitly deallocated");
return (NULL);
return NULL;
label_ENOENT:
assert_false(config_stats,
"ENOENT should only be returned if stats are disabled");
test_skip("\"thread.allocated\" mallctl not available");
return (NULL);
return NULL;
}
TEST_BEGIN(test_main_thread) {
@ -115,10 +115,10 @@ TEST_END
int
main(void) {
/* Run tests multiple times to check for bad interactions. */
return (test(
return test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
test_main_thread);
}

View File

@ -20,6 +20,6 @@ TEST_END
int
main() {
return (test(
test_basic));
return test(
test_basic);
}

View File

@ -174,7 +174,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_extent_manual_hook,
test_extent_auto_hook));
test_extent_auto_hook);
}

View File

@ -13,12 +13,12 @@ get_nsizes_impl(const char *cmd) {
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
return ret;
}
static unsigned
get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
return get_nsizes_impl("arenas.nlextents");
}
static size_t
@ -36,12 +36,12 @@ get_size_impl(const char *cmd, size_t ind) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
return ret;
}
static size_t
get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
return get_size_impl("arenas.lextent.0.size", ind);
}
/*
@ -216,9 +216,9 @@ TEST_END
int
main(void) {
return (test(
return test(
test_overflow,
test_oom,
test_basic,
test_alignment_and_size));
test_alignment_and_size);
}

View File

@ -41,6 +41,6 @@ TEST_END
int
main(void) {
return (test(
test_overflow));
return test(
test_overflow);
}

View File

@ -120,8 +120,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_alignment_errors,
test_oom_errors,
test_alignment_and_size));
test_alignment_and_size);
}

View File

@ -9,12 +9,12 @@ get_nsizes_impl(const char *cmd) {
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
return ret;
}
static unsigned
get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
return get_nsizes_impl("arenas.nlextents");
}
static size_t
@ -32,12 +32,12 @@ get_size_impl(const char *cmd, size_t ind) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
return ret;
}
static size_t
get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
return get_size_impl("arenas.lextent.0.size", ind);
}
TEST_BEGIN(test_grow_and_shrink) {
@ -100,7 +100,7 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
}
}
return (ret);
return ret;
}
TEST_BEGIN(test_zero) {
@ -236,10 +236,10 @@ TEST_END
int
main(void) {
return (test(
return test(
test_grow_and_shrink,
test_zero,
test_align,
test_lg_align_and_zero,
test_overflow));
test_overflow);
}

View File

@ -49,7 +49,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_basic,
test_alignment_and_size));
test_alignment_and_size);
}

View File

@ -34,7 +34,7 @@ thd_start(void *arg) {
assert_u_eq(arena_ind, main_arena_ind,
"Arena index should be same as for main thread");
return (NULL);
return NULL;
}
TEST_BEGIN(test_thread_arena) {
@ -72,6 +72,6 @@ TEST_END
int
main(void) {
return (test(
test_thread_arena));
return test(
test_thread_arena);
}

View File

@ -77,10 +77,10 @@ thd_start(void *arg) {
assert_false(e0, "tcache should be disabled");
free(malloc(1));
return (NULL);
return NULL;
label_ENOENT:
test_skip("\"thread.tcache.enabled\" mallctl not available");
return (NULL);
return NULL;
}
TEST_BEGIN(test_main_thread) {
@ -99,10 +99,10 @@ TEST_END
int
main(void) {
/* Run tests multiple times to check for bad interactions. */
return (test(
return test(
test_main_thread,
test_subthread,
test_main_thread,
test_subthread,
test_main_thread));
test_main_thread);
}

View File

@ -19,7 +19,7 @@ arena_ind(void) {
0), 0, "Unexpected mallctl failure creating arena");
}
return (ind);
return ind;
}
TEST_BEGIN(test_same_size) {
@ -76,17 +76,17 @@ get_nsizes_impl(const char *cmd) {
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
return ret;
}
static unsigned
get_nsmall(void) {
return (get_nsizes_impl("arenas.nbins"));
return get_nsizes_impl("arenas.nbins");
}
static unsigned
get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
return get_nsizes_impl("arenas.nlextents");
}
static size_t
@ -104,17 +104,17 @@ get_size_impl(const char *cmd, size_t ind) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
return ret;
}
static size_t
get_small_size(size_t ind) {
return (get_size_impl("arenas.bin.0.size", ind));
return get_size_impl("arenas.bin.0.size", ind);
}
static size_t
get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
return get_size_impl("arenas.lextent.0.size", ind);
}
TEST_BEGIN(test_size) {
@ -312,7 +312,7 @@ validate_fill(const void *p, uint8_t c, size_t offset, size_t len) {
print_filled_extents(p, c, offset + len);
}
return (err);
return err;
}
static void
@ -376,7 +376,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_same_size,
test_extra_no_move,
test_no_move_fail,
@ -384,5 +384,5 @@ main(void) {
test_size_extra_overflow,
test_extra_small,
test_extra_large,
test_zero_large));
test_zero_large);
}

View File

@ -2,5 +2,5 @@
void *
btalloc(size_t size, unsigned bits) {
return (btalloc_0(size, bits));
return btalloc_0(size, bits);
}

View File

@ -9,7 +9,7 @@ mtx_init(mtx_t *mtx) {
#ifdef _WIN32
if (!InitializeCriticalSectionAndSpinCount(&mtx->lock,
_CRT_SPINCOUNT)) {
return (true);
return true;
}
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
mtx->lock = OS_UNFAIR_LOCK_INIT;
@ -19,16 +19,16 @@ mtx_init(mtx_t *mtx) {
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0) {
return (true);
return true;
}
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
if (pthread_mutex_init(&mtx->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
return true;
}
pthread_mutexattr_destroy(&attr);
#endif
return (false);
return false;
}
void

View File

@ -65,7 +65,7 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap) {
*/
if (nallocx(1, 0) == 0) {
malloc_printf("Initialization error");
return (test_status_fail);
return test_status_fail;
}
}
@ -85,7 +85,7 @@ p_test_impl(bool do_malloc_init, test_t *t, va_list ap) {
test_status_string(test_status_fail),
test_counts[test_status_fail], test_count);
return (ret);
return ret;
}
test_status_t
@ -98,7 +98,7 @@ p_test(test_t *t, ...) {
ret = p_test_impl(true, t, ap);
va_end(ap);
return (ret);
return ret;
}
test_status_t
@ -111,7 +111,7 @@ p_test_no_malloc_init(test_t *t, ...) {
ret = p_test_impl(false, t, ap);
va_end(ap);
return (ret);
return ret;
}
void

View File

@ -18,7 +18,7 @@ timer_usec(const timedelta_t *timer) {
nstime_copy(&delta, &timer->t1);
nstime_subtract(&delta, &timer->t0);
return (nstime_ns(&delta) / 1000);
return nstime_ns(&delta) / 1000;
}
void

View File

@ -156,10 +156,10 @@ TEST_END
int
main(void) {
return (test(
return test(
test_malloc_vs_mallocx,
test_free_vs_dallocx,
test_dallocx_vs_sdallocx,
test_mus_vs_sallocx,
test_sallocx_vs_nallocx));
test_sallocx_vs_nallocx);
}

View File

@ -1591,9 +1591,9 @@ TEST_END
int
main(void) {
return (test(
return test(
test_gen_rand_32,
test_by_array_32,
test_gen_rand_64,
test_by_array_64));
test_by_array_64);
}

View File

@ -11,6 +11,6 @@ TEST_END
int
main(void) {
return (test_no_malloc_init(
test_a0));
return test_no_malloc_init(
test_a0);
}

View File

@ -13,17 +13,17 @@ get_nsizes_impl(const char *cmd) {
assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0,
"Unexpected mallctl(\"%s\", ...) failure", cmd);
return (ret);
return ret;
}
static unsigned
get_nsmall(void) {
return (get_nsizes_impl("arenas.nbins"));
return get_nsizes_impl("arenas.nbins");
}
static unsigned
get_nlarge(void) {
return (get_nsizes_impl("arenas.nlextents"));
return get_nsizes_impl("arenas.nlextents");
}
static size_t
@ -41,17 +41,17 @@ get_size_impl(const char *cmd, size_t ind) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0),
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
return (ret);
return ret;
}
static size_t
get_small_size(size_t ind) {
return (get_size_impl("arenas.bin.0.size", ind));
return get_size_impl("arenas.bin.0.size", ind);
}
static size_t
get_large_size(size_t ind) {
return (get_size_impl("arenas.lextent.0.size", ind));
return get_size_impl("arenas.lextent.0.size", ind);
}
/* Like ivsalloc(), but safe to call on discarded allocations. */
@ -61,13 +61,13 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
extent = extent_lookup(tsdn, ptr, false);
if (extent == NULL) {
return (0);
return 0;
}
if (!extent_active_get(extent)) {
return (0);
return 0;
}
return (isalloc(tsdn, extent, ptr));
return isalloc(tsdn, extent, ptr);
}
static unsigned
@ -77,7 +77,7 @@ do_arena_create(extent_hooks_t *h) {
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
(void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
"Unexpected mallctl() failure");
return (arena_ind);
return arena_ind;
}
static void
@ -190,7 +190,7 @@ arena_i_initialized(unsigned arena_ind, bool refresh) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&initialized, &sz, NULL,
0), 0, "Unexpected mallctlbymib() failure");
return (initialized);
return initialized;
}
TEST_BEGIN(test_arena_destroy_initial) {
@ -255,11 +255,11 @@ extent_dalloc_unmap(extent_hooks_t *extent_hooks, void *addr, size_t size,
"Wrong hook function");
called_dalloc = true;
if (!try_dalloc) {
return (true);
return true;
}
pages_unmap(addr, size);
did_dalloc = true;
return (false);
return false;
}
static extent_hooks_t hooks_orig;
@ -313,9 +313,9 @@ TEST_END
int
main(void) {
return (test(
return test(
test_arena_reset,
test_arena_destroy_initial,
test_arena_destroy_hooks_default,
test_arena_destroy_hooks_unmap));
test_arena_destroy_hooks_unmap);
}

View File

@ -101,10 +101,10 @@ TEST_END
int
main(void) {
return (test(
return test(
test_atomic_u64,
test_atomic_u32,
test_atomic_p,
test_atomic_zu,
test_atomic_u));
test_atomic_u);
}

View File

@ -212,8 +212,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_base_hooks_default,
test_base_hooks_null,
test_base_hooks_not_null));
test_base_hooks_not_null);
}

View File

@ -143,7 +143,7 @@ test_bitmap_size_body(const bitmap_info_t *binfo, size_t nbits,
assert_zu_ge(size, (nbits >> 3),
"Bitmap size is smaller than expected");
assert_zu_ge(size, prev_size, "Bitmap size is smaller than expected");
return (size);
return size;
}
TEST_BEGIN(test_bitmap_size) {
@ -329,11 +329,11 @@ TEST_END
int
main(void) {
return (test(
return test(
test_bitmap_initializer,
test_bitmap_size,
test_bitmap_init,
test_bitmap_set,
test_bitmap_unset,
test_bitmap_sfu));
test_bitmap_sfu);
}

View File

@ -204,8 +204,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_new_delete,
test_count_insert_search_remove,
test_insert_iter_remove));
test_insert_iter_remove);
}

View File

@ -11,7 +11,7 @@ static bool monotonic_mock;
static bool
nstime_monotonic_mock(void) {
return (monotonic_mock);
return monotonic_mock;
}
static bool
@ -20,7 +20,7 @@ nstime_update_mock(nstime_t *time) {
if (monotonic_mock) {
nstime_copy(time, &time_mock);
}
return (!monotonic_mock);
return !monotonic_mock;
}
TEST_BEGIN(test_decay_ticks) {
@ -352,8 +352,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_decay_ticks,
test_decay_ticker,
test_decay_nonmonotonic));
test_decay_nonmonotonic);
}

View File

@ -134,8 +134,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_small_extent_size,
test_large_extent_size,
test_monotonic));
test_monotonic);
}

View File

@ -57,6 +57,6 @@ TEST_END
int
main(void) {
return (test(
test_fork));
return test(
test_fork);
}

View File

@ -38,9 +38,9 @@ typedef enum {
static int
hash_variant_bits(hash_variant_t variant) {
switch (variant) {
case hash_variant_x86_32: return (32);
case hash_variant_x86_128: return (128);
case hash_variant_x64_128: return (128);
case hash_variant_x86_32: return 32;
case hash_variant_x86_128: return 128;
case hash_variant_x64_128: return 128;
default: not_reached();
}
}
@ -48,9 +48,9 @@ hash_variant_bits(hash_variant_t variant) {
static const char *
hash_variant_string(hash_variant_t variant) {
switch (variant) {
case hash_variant_x86_32: return ("hash_x86_32");
case hash_variant_x86_128: return ("hash_x86_128");
case hash_variant_x64_128: return ("hash_x64_128");
case hash_variant_x86_32: return "hash_x86_32";
case hash_variant_x86_128: return "hash_x86_128";
case hash_variant_x64_128: return "hash_x64_128";
default: not_reached();
}
}
@ -165,8 +165,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_hash_x86_32,
test_hash_x86_128,
test_hash_x64_128));
test_hash_x64_128);
}

View File

@ -138,7 +138,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_junk_small,
test_junk_large));
test_junk_large);
}

View File

@ -621,7 +621,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_mallctl_errors,
test_mallctlnametomib_errors,
test_mallctlbymib_errors,
@ -643,5 +643,5 @@ main(void) {
test_arenas_bin_constants,
test_arenas_lextent_constants,
test_arenas_create,
test_stats_arenas));
test_stats_arenas);
}

View File

@ -18,7 +18,7 @@ double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) {
double rel_err;
if (fabs(a - b) < max_abs_err) {
return (true);
return true;
}
rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a);
return (rel_err < max_rel_err);
@ -33,7 +33,7 @@ factorial(unsigned x) {
ret *= (uint64_t)i;
}
return (ret);
return ret;
}
TEST_BEGIN(test_ln_gamma_factorial) {
@ -380,11 +380,11 @@ TEST_END
int
main(void) {
return (test(
return test(
test_ln_gamma_factorial,
test_ln_gamma_misc,
test_pt_norm,
test_pt_chi2,
test_pt_gamma_shape,
test_pt_gamma_scale));
test_pt_gamma_scale);
}

View File

@ -39,7 +39,7 @@ thd_receiver_start(void *arg) {
assert_ptr_not_null(msg, "mq_get() should never return NULL");
dallocx(msg, 0);
}
return (NULL);
return NULL;
}
static void *
@ -55,7 +55,7 @@ thd_sender_start(void *arg) {
msg = (mq_msg_t *)p;
mq_put(mq, msg);
}
return (NULL);
return NULL;
}
TEST_BEGIN(test_mq_threaded) {
@ -82,8 +82,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_mq_basic,
test_mq_threaded));
test_mq_threaded);
}

View File

@ -28,7 +28,7 @@ thd_start(void *varg) {
arg->x++;
mtx_unlock(&arg->mtx);
}
return (NULL);
return NULL;
}
TEST_BEGIN(test_mtx_race) {
@ -51,7 +51,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_mtx_basic,
test_mtx_race));
test_mtx_race);
}

View File

@ -198,7 +198,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_nstime_init,
test_nstime_init2,
test_nstime_copy,
@ -209,5 +209,5 @@ main(void) {
test_nstime_idivide,
test_nstime_divide,
test_nstime_monotonic,
test_nstime_update));
test_nstime_update);
}

View File

@ -41,12 +41,12 @@ binind_compute(void) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
if (size == SZ) {
return (i);
return i;
}
}
test_fail("Unable to compute nregs_per_run");
return (0);
return 0;
}
static size_t
@ -63,7 +63,7 @@ nregs_per_run_compute(void) {
sz = sizeof(nregs);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
0), 0, "Unexpected mallctlbymib failure");
return (nregs);
return nregs;
}
static unsigned
@ -75,7 +75,7 @@ arenas_create_mallctl(void) {
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
0, "Error in arenas.create");
return (arena_ind);
return arena_ind;
}
static void
@ -158,6 +158,6 @@ TEST_END
int
main(void) {
return (test(
test_pack));
return test(
test_pack);
}

View File

@ -22,6 +22,6 @@ TEST_END
int
main(void) {
return (test(
test_pages_huge));
return test(
test_pages_huge);
}

View File

@ -22,7 +22,7 @@ node_cmp(const node_t *a, const node_t *b) {
ret = (((uintptr_t)a) > ((uintptr_t)b))
- (((uintptr_t)a) < ((uintptr_t)b));
}
return (ret);
return ret;
}
static int
@ -31,7 +31,7 @@ node_cmp_magic(const node_t *a, const node_t *b) {
assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic");
assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic");
return (node_cmp(a, b));
return node_cmp(a, b);
}
typedef ph(node_t) heap_t;
@ -94,7 +94,7 @@ node_validate(const node_t *node, const node_t *parent) {
leftmost_child = phn_lchild_get(node_t, link, node);
if (leftmost_child == NULL) {
return (nnodes);
return nnodes;
}
assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child),
(void *)node, "Leftmost child does not link to node");
@ -107,7 +107,7 @@ node_validate(const node_t *node, const node_t *parent) {
"sibling's prev doesn't link to sibling");
nnodes += node_validate(sibling, node);
}
return (nnodes);
return nnodes;
}
static unsigned
@ -133,7 +133,7 @@ label_return:
if (false) {
heap_print(heap);
}
return (nnodes);
return nnodes;
}
TEST_BEGIN(test_ph_empty) {
@ -156,7 +156,7 @@ static node_t *
node_remove_first(heap_t *heap) {
node_t *node = heap_remove_first(heap);
node->magic = 0;
return (node);
return node;
}
TEST_BEGIN(test_ph_random) {
@ -281,7 +281,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_ph_empty,
test_ph_random));
test_ph_random);
}

View File

@ -221,7 +221,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_prng_lg_range_u32_nonatomic,
test_prng_lg_range_u32_atomic,
test_prng_lg_range_u64_nonatomic,
@ -231,5 +231,5 @@ main(void) {
test_prng_range_u32_atomic,
test_prng_range_u64_nonatomic,
test_prng_range_zu_nonatomic,
test_prng_range_zu_atomic));
test_prng_range_zu_atomic);
}

View File

@ -17,12 +17,12 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
return fd;
}
static void *
alloc_from_permuted_backtrace(unsigned thd_ind, unsigned iteration) {
return (btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration));
return btalloc(1, thd_ind*NALLOCS_PER_THREAD + iteration);
}
static void *
@ -51,7 +51,7 @@ thd_start(void *varg) {
}
}
return (NULL);
return NULL;
}
TEST_BEGIN(test_idump) {
@ -81,6 +81,6 @@ TEST_END
int
main(void) {
return (test(
test_idump));
return test(
test_idump);
}

View File

@ -117,6 +117,6 @@ TEST_END
int
main(void) {
return (test(
test_prof_active));
return test(
test_prof_active);
}

View File

@ -15,7 +15,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
return fd;
}
TEST_BEGIN(test_gdump) {
@ -73,6 +73,6 @@ TEST_END
int
main(void) {
return (test(
test_gdump));
return test(
test_gdump);
}

View File

@ -24,7 +24,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
return fd;
}
TEST_BEGIN(test_idump) {
@ -50,6 +50,6 @@ TEST_END
int
main(void) {
return (test(
test_idump));
return test(
test_idump);
}

View File

@ -12,7 +12,7 @@ prof_dump_open_intercept(bool propagate_err, const char *filename) {
fd = open("/dev/null", O_WRONLY);
assert_d_ne(fd, -1, "Unexpected open() failure");
return (fd);
return fd;
}
static void
@ -29,7 +29,7 @@ get_lg_prof_sample(void) {
assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz,
NULL, 0), 0,
"Unexpected mallctl failure while reading profiling sample rate");
return (lg_prof_sample);
return lg_prof_sample;
}
static void
@ -94,7 +94,7 @@ prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err,
prof_dump_header_intercepted = true;
memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
return (false);
return false;
}
TEST_BEGIN(test_prof_reset_cleanup) {
@ -181,7 +181,7 @@ thd_start(void *varg) {
}
}
return (NULL);
return NULL;
}
TEST_BEGIN(test_prof_reset) {
@ -283,9 +283,9 @@ main(void) {
/* Intercept dumping prior to running any tests. */
prof_dump_open = prof_dump_open_intercept;
return (test(
return test(
test_prof_reset_basic,
test_prof_reset_cleanup,
test_prof_reset,
test_xallocx));
test_xallocx);
}

View File

@ -94,7 +94,7 @@ thd_start(void *varg) {
mallctl_thread_name_set(thread_name);
mallctl_thread_name_set("");
return (NULL);
return NULL;
}
TEST_BEGIN(test_prof_thread_name_threaded) {
@ -118,7 +118,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_prof_thread_name_validation,
test_prof_thread_name_threaded));
test_prof_thread_name_threaded);
}

View File

@ -192,11 +192,11 @@ TEST_END
int
main(void) {
return (test(
return test(
test_ql_empty,
test_ql_tail_insert,
test_ql_tail_remove,
test_ql_head_insert,
test_ql_head_remove,
test_ql_insert));
test_ql_insert);
}

View File

@ -232,10 +232,10 @@ TEST_END
int
main(void) {
return (test(
return test(
test_qr_one,
test_qr_after_insert,
test_qr_remove,
test_qr_before_insert,
test_qr_meld_split));
test_qr_meld_split);
}

View File

@ -36,7 +36,7 @@ node_cmp(const node_t *a, const node_t *b) {
ret = (((uintptr_t)a) > ((uintptr_t)b))
- (((uintptr_t)a) < ((uintptr_t)b));
}
return (ret);
return ret;
}
typedef rb_tree(node_t) tree_t;
@ -73,7 +73,7 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
node_t *right_node;
if (node == NULL) {
return (ret);
return ret;
}
left_node = rbtn_left_get(node_t, link, node);
@ -112,7 +112,7 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) {
ret += (black_depth != black_height);
}
return (ret);
return ret;
}
static node_t *
@ -139,7 +139,7 @@ tree_iterate_cb(tree_t *tree, node_t *node, void *data) {
(*i)++;
return (NULL);
return NULL;
}
static unsigned
@ -149,7 +149,7 @@ tree_iterate(tree_t *tree) {
i = 0;
tree_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
return i;
}
static unsigned
@ -159,7 +159,7 @@ tree_iterate_reverse(tree_t *tree) {
i = 0;
tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i);
return (i);
return i;
}
static void
@ -201,7 +201,7 @@ remove_iterate_cb(tree_t *tree, node_t *node, void *data) {
node_remove(tree, node, *nnodes);
return (ret);
return ret;
}
static node_t *
@ -211,7 +211,7 @@ remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) {
node_remove(tree, node, *nnodes);
return (ret);
return ret;
}
static void
@ -347,7 +347,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_rb_empty,
test_rb_random));
test_rb_random);
}

View File

@ -18,7 +18,7 @@ rtree_node_alloc_intercept(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
assert_ptr_not_null(node, "Unexpected calloc() failure");
malloc_mutex_lock(tsdn, &rtree->init_lock);
return (node);
return node;
}
static void
@ -102,7 +102,7 @@ thd_start(void *varg) {
free(extent);
fini_gen_rand(sfmt);
return (NULL);
return NULL;
}
TEST_BEGIN(test_rtree_concurrent) {
@ -283,10 +283,10 @@ main(void) {
rtree_node_dalloc = rtree_node_dalloc_intercept;
test_rtree = NULL;
return (test(
return test(
test_rtree_read_empty,
test_rtree_concurrent,
test_rtree_extrema,
test_rtree_bits,
test_rtree_random));
test_rtree_random);
}

View File

@ -19,7 +19,7 @@ get_max_size_class(void) {
assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz,
NULL, 0), 0, "Unexpected mallctlbymib() error");
return (max_size_class);
return max_size_class;
}
TEST_BEGIN(test_size_classes) {
@ -173,8 +173,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_size_classes,
test_psize_classes,
test_overflow));
test_overflow);
}

View File

@ -27,6 +27,6 @@ TEST_END
int
main(void) {
return (test(
test_arena_slab_regind));
return test(
test_arena_slab_regind);
}

View File

@ -95,8 +95,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_smoothstep_integral,
test_smoothstep_monotonic,
test_smoothstep_slope));
test_smoothstep_slope);
}

View File

@ -116,7 +116,7 @@ TEST_END
void *
thd_start(void *arg) {
return (NULL);
return NULL;
}
static void
@ -339,12 +339,12 @@ TEST_END
int
main(void) {
return (test(
return test(
test_stats_summary,
test_stats_large,
test_stats_arenas_summary,
test_stats_arenas_small,
test_stats_arenas_large,
test_stats_arenas_bins,
test_stats_arenas_lextents));
test_stats_arenas_lextents);
}

View File

@ -983,7 +983,7 @@ TEST_END
int
main(void) {
return (test(
return test(
test_json_parser,
test_stats_print_json));
test_stats_print_json);
}

View File

@ -64,8 +64,8 @@ TEST_END
int
main(void) {
return (test(
return test(
test_ticker_tick,
test_ticker_ticks,
test_ticker_copy));
test_ticker_copy);
}

Some files were not shown because too many files have changed in this diff Show More