Remove leading blank lines from function bodies.

This resolves #535.
This commit is contained in:
Jason Evans
2017-01-13 10:35:35 -08:00
parent 87e81e609b
commit ffbb7dac3d
103 changed files with 0 additions and 611 deletions

View File

@@ -16,35 +16,30 @@ bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
JEMALLOC_INLINE unsigned
arena_ind_get(const arena_t *arena)
{
return (base_ind_get(arena->base));
}
JEMALLOC_INLINE void
arena_internal_add(arena_t *arena, size_t size)
{
atomic_add_zu(&arena->stats.internal, size);
}
JEMALLOC_INLINE void
arena_internal_sub(arena_t *arena, size_t size)
{
atomic_sub_zu(&arena->stats.internal, size);
}
JEMALLOC_INLINE size_t
arena_internal_get(arena_t *arena)
{
return (atomic_read_zu(&arena->stats.internal));
}
JEMALLOC_INLINE bool
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
assert(prof_interval != 0);
@@ -59,7 +54,6 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
JEMALLOC_INLINE bool
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (likely(prof_interval == 0))
@@ -70,7 +64,6 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
JEMALLOC_INLINE bool
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (likely(prof_interval == 0))

View File

@@ -33,7 +33,6 @@ arena_bin_index(arena_t *arena, arena_bin_t *bin)
JEMALLOC_INLINE prof_tctx_t *
arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
@@ -46,7 +45,6 @@ JEMALLOC_INLINE void
arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
@@ -58,7 +56,6 @@ JEMALLOC_INLINE void
arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(!extent_slab_get(extent));
@@ -85,7 +82,6 @@ arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
{
malloc_mutex_assert_not_owner(tsdn, &arena->lock);
arena_decay_ticks(tsdn, arena, 1);
@@ -95,7 +91,6 @@ JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path)
{
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
@@ -118,7 +113,6 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr)
{
return (extent_arena_get(iealloc(tsdn, ptr)));
}
@@ -142,7 +136,6 @@ JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
bool slow_path)
{
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@@ -176,7 +169,6 @@ JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
tcache_t *tcache, bool slow_path)
{
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);

View File

@@ -101,7 +101,6 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
JEMALLOC_INLINE void
atomic_write_u64(uint64_t *p, uint64_t x)
{
asm volatile (
"xchgq %1, %0;" /* Lock is implied by xchgq. */
: "=m" (*p), "+r" (x) /* Outputs. */
@@ -141,7 +140,6 @@ atomic_write_u64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x)
{
/*
* atomic_fetchadd_64() doesn't exist, but we only ever use this
* function on LP64 systems, so atomic_fetchadd_long() will do.
@@ -154,7 +152,6 @@ atomic_add_u64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
@@ -163,7 +160,6 @@ atomic_sub_u64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
@@ -172,7 +168,6 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
JEMALLOC_INLINE void
atomic_write_u64(uint64_t *p, uint64_t x)
{
assert(sizeof(uint64_t) == sizeof(unsigned long));
atomic_store_rel_long(p, x);
@@ -181,21 +176,18 @@ atomic_write_u64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x)
{
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
}
@@ -213,14 +205,12 @@ atomic_write_u64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
@@ -236,7 +226,6 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
JEMALLOC_INLINE void
atomic_write_u64(uint64_t *p, uint64_t x)
{
InterlockedExchange64(p, x);
}
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
@@ -244,28 +233,24 @@ atomic_write_u64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE uint64_t
atomic_add_u64(uint64_t *p, uint64_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint64_t
atomic_sub_u64(uint64_t *p, uint64_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_u64(uint64_t *p, uint64_t x)
{
__sync_lock_test_and_set(p, x);
}
# else
@@ -325,7 +310,6 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
JEMALLOC_INLINE void
atomic_write_u32(uint32_t *p, uint32_t x)
{
asm volatile (
"xchgl %1, %0;" /* Lock is implied by xchgl. */
: "=m" (*p), "+r" (x) /* Outputs. */
@@ -365,49 +349,42 @@ atomic_write_u32(uint32_t *p, uint32_t x)
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x)
{
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!atomic_cmpset_32(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_u32(uint32_t *p, uint32_t x)
{
atomic_store_rel_32(p, x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x)
{
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
}
@@ -425,14 +402,12 @@ atomic_write_u32(uint32_t *p, uint32_t x)
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
@@ -448,7 +423,6 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
JEMALLOC_INLINE void
atomic_write_u32(uint32_t *p, uint32_t x)
{
InterlockedExchange(p, x);
}
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
@@ -456,28 +430,24 @@ atomic_write_u32(uint32_t *p, uint32_t x)
JEMALLOC_INLINE uint32_t
atomic_add_u32(uint32_t *p, uint32_t x)
{
return (__sync_add_and_fetch(p, x));
}
JEMALLOC_INLINE uint32_t
atomic_sub_u32(uint32_t *p, uint32_t x)
{
return (__sync_sub_and_fetch(p, x));
}
JEMALLOC_INLINE bool
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
{
return (!__sync_bool_compare_and_swap(p, c, s));
}
JEMALLOC_INLINE void
atomic_write_u32(uint32_t *p, uint32_t x)
{
__sync_lock_test_and_set(p, x);
}
#else
@@ -489,7 +459,6 @@ atomic_write_u32(uint32_t *p, uint32_t x)
JEMALLOC_INLINE void *
atomic_add_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
@@ -500,7 +469,6 @@ atomic_add_p(void **p, void *x)
JEMALLOC_INLINE void *
atomic_sub_p(void **p, void *x)
{
#if (LG_SIZEOF_PTR == 3)
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
@@ -511,7 +479,6 @@ atomic_sub_p(void **p, void *x)
JEMALLOC_INLINE bool
atomic_cas_p(void **p, void *c, void *s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
@@ -522,7 +489,6 @@ atomic_cas_p(void **p, void *c, void *s)
JEMALLOC_INLINE void
atomic_write_p(void **p, const void *x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
@@ -535,7 +501,6 @@ atomic_write_p(void **p, const void *x)
JEMALLOC_INLINE size_t
atomic_add_zu(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_PTR == 2)
@@ -546,7 +511,6 @@ atomic_add_zu(size_t *p, size_t x)
JEMALLOC_INLINE size_t
atomic_sub_zu(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
#elif (LG_SIZEOF_PTR == 2)
@@ -557,7 +521,6 @@ atomic_sub_zu(size_t *p, size_t x)
JEMALLOC_INLINE bool
atomic_cas_zu(size_t *p, size_t c, size_t s)
{
#if (LG_SIZEOF_PTR == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_PTR == 2)
@@ -568,7 +531,6 @@ atomic_cas_zu(size_t *p, size_t c, size_t s)
JEMALLOC_INLINE void
atomic_write_zu(size_t *p, size_t x)
{
#if (LG_SIZEOF_PTR == 3)
atomic_write_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_PTR == 2)
@@ -581,7 +543,6 @@ atomic_write_zu(size_t *p, size_t x)
JEMALLOC_INLINE unsigned
atomic_add_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x));
#elif (LG_SIZEOF_INT == 2)
@@ -592,7 +553,6 @@ atomic_add_u(unsigned *p, unsigned x)
JEMALLOC_INLINE unsigned
atomic_sub_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
return ((unsigned)atomic_add_u64((uint64_t *)p,
(uint64_t)-((int64_t)x)));
@@ -605,7 +565,6 @@ atomic_sub_u(unsigned *p, unsigned x)
JEMALLOC_INLINE bool
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
{
#if (LG_SIZEOF_INT == 3)
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
#elif (LG_SIZEOF_INT == 2)
@@ -616,7 +575,6 @@ atomic_cas_u(unsigned *p, unsigned c, unsigned s)
JEMALLOC_INLINE void
atomic_write_u(unsigned *p, unsigned x)
{
#if (LG_SIZEOF_INT == 3)
atomic_write_u64((uint64_t *)p, (uint64_t)x);
#elif (LG_SIZEOF_INT == 2)

View File

@@ -9,7 +9,6 @@ unsigned base_ind_get(const base_t *base);
JEMALLOC_INLINE unsigned
base_ind_get(const base_t *base)
{
return (base->ind);
}
#endif

View File

@@ -55,14 +55,12 @@ extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
JEMALLOC_INLINE arena_t *
extent_arena_get(const extent_t *extent)
{
return (extent->e_arena);
}
JEMALLOC_INLINE void *
extent_base_get(const extent_t *extent)
{
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (PAGE_ADDR2BASE(extent->e_addr));
@@ -71,7 +69,6 @@ extent_base_get(const extent_t *extent)
JEMALLOC_INLINE void *
extent_addr_get(const extent_t *extent)
{
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
!extent->e_slab);
return (extent->e_addr);
@@ -80,14 +77,12 @@ extent_addr_get(const extent_t *extent)
JEMALLOC_INLINE size_t
extent_size_get(const extent_t *extent)
{
return (extent->e_size);
}
JEMALLOC_INLINE size_t
extent_usize_get(const extent_t *extent)
{
assert(!extent->e_slab);
return (extent->e_usize);
}
@@ -95,14 +90,12 @@ extent_usize_get(const extent_t *extent)
JEMALLOC_INLINE void *
extent_before_get(const extent_t *extent)
{
return ((void *)((uintptr_t)extent_base_get(extent) - PAGE));
}
JEMALLOC_INLINE void *
extent_last_get(const extent_t *extent)
{
return ((void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent) - PAGE));
}
@@ -110,7 +103,6 @@ extent_last_get(const extent_t *extent)
JEMALLOC_INLINE void *
extent_past_get(const extent_t *extent)
{
return ((void *)((uintptr_t)extent_base_get(extent) +
extent_size_get(extent)));
}
@@ -118,49 +110,42 @@ extent_past_get(const extent_t *extent)
JEMALLOC_INLINE size_t
extent_sn_get(const extent_t *extent)
{
return (extent->e_sn);
}
JEMALLOC_INLINE bool
extent_active_get(const extent_t *extent)
{
return (extent->e_active);
}
JEMALLOC_INLINE bool
extent_retained_get(const extent_t *extent)
{
return (qr_next(extent, qr_link) == extent);
}
JEMALLOC_INLINE bool
extent_zeroed_get(const extent_t *extent)
{
return (extent->e_zeroed);
}
JEMALLOC_INLINE bool
extent_committed_get(const extent_t *extent)
{
return (extent->e_committed);
}
JEMALLOC_INLINE bool
extent_slab_get(const extent_t *extent)
{
return (extent->e_slab);
}
JEMALLOC_INLINE arena_slab_data_t *
extent_slab_data_get(extent_t *extent)
{
assert(extent->e_slab);
return (&extent->e_slab_data);
}
@@ -168,7 +153,6 @@ extent_slab_data_get(extent_t *extent)
JEMALLOC_INLINE const arena_slab_data_t *
extent_slab_data_get_const(const extent_t *extent)
{
assert(extent->e_slab);
return (&extent->e_slab_data);
}
@@ -176,7 +160,6 @@ extent_slab_data_get_const(const extent_t *extent)
JEMALLOC_INLINE prof_tctx_t *
extent_prof_tctx_get(const extent_t *extent)
{
return ((prof_tctx_t *)atomic_read_p(
&((extent_t *)extent)->e_prof_tctx_pun));
}
@@ -184,21 +167,18 @@ extent_prof_tctx_get(const extent_t *extent)
JEMALLOC_INLINE void
extent_arena_set(extent_t *extent, arena_t *arena)
{
extent->e_arena = arena;
}
JEMALLOC_INLINE void
extent_addr_set(extent_t *extent, void *addr)
{
extent->e_addr = addr;
}
JEMALLOC_INLINE void
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
{
assert(extent_base_get(extent) == extent_addr_get(extent));
if (alignment < PAGE) {
@@ -219,56 +199,48 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
JEMALLOC_INLINE void
extent_size_set(extent_t *extent, size_t size)
{
extent->e_size = size;
}
JEMALLOC_INLINE void
extent_usize_set(extent_t *extent, size_t usize)
{
extent->e_usize = usize;
}
JEMALLOC_INLINE void
extent_sn_set(extent_t *extent, size_t sn)
{
extent->e_sn = sn;
}
JEMALLOC_INLINE void
extent_active_set(extent_t *extent, bool active)
{
extent->e_active = active;
}
JEMALLOC_INLINE void
extent_zeroed_set(extent_t *extent, bool zeroed)
{
extent->e_zeroed = zeroed;
}
JEMALLOC_INLINE void
extent_committed_set(extent_t *extent, bool committed)
{
extent->e_committed = committed;
}
JEMALLOC_INLINE void
extent_slab_set(extent_t *extent, bool slab)
{
extent->e_slab = slab;
}
JEMALLOC_INLINE void
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
{
atomic_write_p(&extent->e_prof_tctx_pun, tctx);
}
@@ -277,7 +249,6 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
size_t usize, size_t sn, bool active, bool zeroed, bool committed,
bool slab)
{
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
extent_arena_set(extent, arena);
@@ -297,14 +268,12 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
JEMALLOC_INLINE void
extent_ring_insert(extent_t *sentinel, extent_t *extent)
{
qr_meld(sentinel, extent, extent_t, qr_link);
}
JEMALLOC_INLINE void
extent_ring_remove(extent_t *extent)
{
qr_remove(extent, qr_link);
}

View File

@@ -23,21 +23,18 @@ void hash(const void *key, size_t len, const uint32_t seed,
JEMALLOC_INLINE uint32_t
hash_rotl_32(uint32_t x, int8_t r)
{
return ((x << r) | (x >> (32 - r)));
}
JEMALLOC_INLINE uint64_t
hash_rotl_64(uint64_t x, int8_t r)
{
return ((x << r) | (x >> (64 - r)));
}
JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
@@ -52,7 +49,6 @@ hash_get_block_32(const uint32_t *p, int i)
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
/* Handle unaligned read. */
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
@@ -67,7 +63,6 @@ hash_get_block_64(const uint64_t *p, int i)
JEMALLOC_INLINE uint32_t
hash_fmix_32(uint32_t h)
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
@@ -80,7 +75,6 @@ hash_fmix_32(uint32_t h)
JEMALLOC_INLINE uint64_t
hash_fmix_64(uint64_t k)
{
k ^= k >> 33;
k *= KQU(0xff51afd7ed558ccd);
k ^= k >> 33;
@@ -326,7 +320,6 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
JEMALLOC_INLINE void
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
{
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))

View File

@@ -553,7 +553,6 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
JEMALLOC_ALWAYS_INLINE pszind_t
psz2ind(size_t psz)
{
if (unlikely(psz > LARGE_MAXCLASS))
return (NPSIZES);
{
@@ -577,7 +576,6 @@ psz2ind(size_t psz)
JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind)
{
if (unlikely(pind == NPSIZES))
return (LARGE_MAXCLASS + PAGE);
{
@@ -608,7 +606,6 @@ pind2sz_lookup(pszind_t pind)
JEMALLOC_INLINE size_t
pind2sz(pszind_t pind)
{
assert(pind < NPSIZES+1);
return (pind2sz_lookup(pind));
}
@@ -616,7 +613,6 @@ pind2sz(pszind_t pind)
JEMALLOC_INLINE size_t
psz2u(size_t psz)
{
if (unlikely(psz > LARGE_MAXCLASS))
return (LARGE_MAXCLASS + PAGE);
{
@@ -633,7 +629,6 @@ psz2u(size_t psz)
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
if (unlikely(size > LARGE_MAXCLASS))
return (NSIZES);
#if (NTBINS != 0)
@@ -664,7 +659,6 @@ size2index_compute(size_t size)
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
assert(size <= LOOKUP_MAXCLASS);
{
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
@@ -676,7 +670,6 @@ size2index_lookup(size_t size)
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (size2index_lookup(size));
@@ -686,7 +679,6 @@ size2index(size_t size)
JEMALLOC_INLINE size_t
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
if (index < NTBINS)
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
@@ -721,7 +713,6 @@ index2size_lookup(szind_t index)
JEMALLOC_ALWAYS_INLINE size_t
index2size(szind_t index)
{
assert(index < NSIZES);
return (index2size_lookup(index));
}
@@ -729,7 +720,6 @@ index2size(szind_t index)
JEMALLOC_ALWAYS_INLINE size_t
s2u_compute(size_t size)
{
if (unlikely(size > LARGE_MAXCLASS))
return (0);
#if (NTBINS > 0)
@@ -767,7 +757,6 @@ s2u_lookup(size_t size)
JEMALLOC_ALWAYS_INLINE size_t
s2u(size_t size)
{
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS))
return (s2u_lookup(size));
@@ -852,14 +841,12 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
JEMALLOC_INLINE arena_t *
arena_choose(tsd_t *tsd, arena_t *arena)
{
return (arena_choose_impl(tsd, arena, false));
}
JEMALLOC_INLINE arena_t *
arena_ichoose(tsd_t *tsd, arena_t *arena)
{
return (arena_choose_impl(tsd, arena, true));
}
@@ -933,7 +920,6 @@ extent_t *iealloc(tsdn_t *tsdn, const void *ptr);
JEMALLOC_ALWAYS_INLINE extent_t *
iealloc(tsdn_t *tsdn, const void *ptr)
{
return (extent_lookup(tsdn, ptr, true));
}
#endif
@@ -975,7 +961,6 @@ bool ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(tsdn_t *tsdn, const void *ptr)
{
assert(ptr != NULL);
return (arena_aalloc(tsdn, ptr));
@@ -991,7 +976,6 @@ iaalloc(tsdn_t *tsdn, const void *ptr)
JEMALLOC_ALWAYS_INLINE size_t
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
{
assert(ptr != NULL);
return (arena_salloc(tsdn, extent, ptr));
@@ -1019,7 +1003,6 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
JEMALLOC_ALWAYS_INLINE void *
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
{
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
false, NULL, slow_path));
}
@@ -1049,14 +1032,12 @@ JEMALLOC_ALWAYS_INLINE void *
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
tcache_get(tsd, true), false, NULL));
}
@@ -1088,7 +1069,6 @@ JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
bool is_internal, bool slow_path)
{
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
@@ -1104,7 +1084,6 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, extent_t *extent, void *ptr)
{
idalloctm(tsd_tsdn(tsd), extent, ptr, tcache_get(tsd, false), false,
true);
}
@@ -1113,7 +1092,6 @@ JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
tcache_t *tcache, bool slow_path)
{
arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
}
@@ -1154,7 +1132,6 @@ JEMALLOC_ALWAYS_INLINE void *
iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
{
assert(ptr != NULL);
assert(size != 0);
@@ -1176,7 +1153,6 @@ JEMALLOC_ALWAYS_INLINE void *
iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero)
{
return (iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
zero, tcache_get(tsd, true), NULL));
}
@@ -1185,7 +1161,6 @@ JEMALLOC_ALWAYS_INLINE bool
ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero)
{
assert(ptr != NULL);
assert(size != 0);

View File

@@ -63,7 +63,6 @@ typedef intptr_t ssize_t;
static int
isblank(int c)
{
return (c == '\t' || c == ' ');
}
#endif

View File

@@ -12,7 +12,6 @@ void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
JEMALLOC_INLINE void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
@@ -35,7 +34,6 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
JEMALLOC_INLINE void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
@@ -57,7 +55,6 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_owner(tsdn, &mutex->witness);
}
@@ -65,7 +62,6 @@ malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_not_owner(tsdn, &mutex->witness);
}

View File

@@ -207,19 +207,16 @@ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
a_attr void \
a_prefix##new(a_ph_type *ph) \
{ \
\
memset(ph, 0, sizeof(ph(a_type))); \
} \
a_attr bool \
a_prefix##empty(a_ph_type *ph) \
{ \
\
return (ph->ph_root == NULL); \
} \
a_attr a_type * \
a_prefix##first(a_ph_type *ph) \
{ \
\
if (ph->ph_root == NULL) \
return (NULL); \
ph_merge_aux(a_type, a_field, ph, a_cmp); \
@@ -228,7 +225,6 @@ a_prefix##first(a_ph_type *ph) \
a_attr void \
a_prefix##insert(a_ph_type *ph, a_type *phn) \
{ \
\
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
/* \

View File

@@ -20,21 +20,18 @@ size_t prng_range_zu(size_t *state, size_t range, bool atomic);
JEMALLOC_ALWAYS_INLINE uint32_t
prng_state_next_u32(uint32_t state)
{
return ((state * PRNG_A_32) + PRNG_C_32);
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_state_next_u64(uint64_t state)
{
return ((state * PRNG_A_64) + PRNG_C_64);
}
JEMALLOC_ALWAYS_INLINE size_t
prng_state_next_zu(size_t state)
{
#if LG_SIZEOF_PTR == 2
return ((state * PRNG_A_32) + PRNG_C_32);
#elif LG_SIZEOF_PTR == 3

View File

@@ -29,7 +29,6 @@ void prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr,
JEMALLOC_ALWAYS_INLINE bool
prof_active_get_unlocked(void)
{
/*
* Even if opt_prof is true, sampling can be temporarily disabled by
* setting prof_active to false. No locking is used when reading
@@ -42,7 +41,6 @@ prof_active_get_unlocked(void)
JEMALLOC_ALWAYS_INLINE bool
prof_gdump_get_unlocked(void)
{
/*
* No locking is used when reading prof_gdump_val in the fast path, so
* there are no guarantees regarding how long it will take for all
@@ -78,7 +76,6 @@ prof_tdata_get(tsd_t *tsd, bool create)
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
@@ -89,7 +86,6 @@ JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
@@ -100,7 +96,6 @@ JEMALLOC_ALWAYS_INLINE void
prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
@@ -162,7 +157,6 @@ JEMALLOC_ALWAYS_INLINE void
prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
assert(usize == isalloc(tsdn, extent, ptr));

View File

@@ -74,7 +74,6 @@ rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
rtree->levels[level].cumbits)) & ((ZU(1) <<
rtree->levels[level].bits) - 1));
@@ -83,7 +82,6 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_elm_t *node)
{
return ((uintptr_t)node != (uintptr_t)0);
}
@@ -144,7 +142,6 @@ rtree_elm_read(rtree_elm_t *elm, bool dependent)
JEMALLOC_INLINE void
rtree_elm_write(rtree_elm_t *elm, const extent_t *extent)
{
atomic_write_p(&elm->pun, extent);
}
@@ -408,7 +405,6 @@ JEMALLOC_INLINE void
rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm,
const extent_t *extent)
{
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
@@ -422,7 +418,6 @@ rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm,
JEMALLOC_INLINE void
rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
{
rtree_elm_write(elm, rtree_elm_read_acquired(tsdn, rtree, elm));
if (config_debug)
rtree_elm_witness_release(tsdn, rtree, elm);

View File

@@ -10,7 +10,6 @@ void spin_adaptive(spin_t *spin);
JEMALLOC_INLINE void
spin_init(spin_t *spin)
{
spin->iteration = 0;
}

View File

@@ -88,7 +88,6 @@ tcache_get(tsd_t *tsd, bool create)
JEMALLOC_ALWAYS_INLINE void
tcache_event(tsd_t *tsd, tcache_t *tcache)
{
if (TCACHE_GC_INCR == 0)
return;

View File

@@ -13,7 +13,6 @@ bool ticker_tick(ticker_t *ticker);
JEMALLOC_INLINE void
ticker_init(ticker_t *ticker, int32_t nticks)
{
ticker->tick = nticks;
ticker->nticks = nticks;
}
@@ -21,21 +20,18 @@ ticker_init(ticker_t *ticker, int32_t nticks)
JEMALLOC_INLINE void
ticker_copy(ticker_t *ticker, const ticker_t *other)
{
*ticker = *other;
}
JEMALLOC_INLINE int32_t
ticker_read(const ticker_t *ticker)
{
return (ticker->tick);
}
JEMALLOC_INLINE bool
ticker_ticks(ticker_t *ticker, int32_t nticks)
{
if (unlikely(ticker->tick < nticks)) {
ticker->tick = ticker->nticks;
return (true);
@@ -47,7 +43,6 @@ ticker_ticks(ticker_t *ticker, int32_t nticks)
JEMALLOC_INLINE bool
ticker_tick(ticker_t *ticker)
{
return (ticker_ticks(ticker, 1));
}
#endif

View File

@@ -51,21 +51,18 @@ tsd_fetch_impl(bool init)
JEMALLOC_ALWAYS_INLINE tsd_t *
tsd_fetch(void)
{
return (tsd_fetch_impl(true));
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsd_tsdn(tsd_t *tsd)
{
return ((tsdn_t *)tsd);
}
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
return (tsd->state == tsd_state_nominal);
}
@@ -73,21 +70,18 @@ tsd_nominal(tsd_t *tsd)
JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) \
{ \
\
return (&tsd->n); \
} \
\
JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) \
{ \
\
return (*tsd_##n##p_get(tsd)); \
} \
\
JEMALLOC_ALWAYS_INLINE void \
tsd_##n##_set(tsd_t *tsd, t n) \
{ \
\
assert(tsd->state == tsd_state_nominal); \
tsd->n = n; \
}
@@ -97,7 +91,6 @@ MALLOC_TSD
JEMALLOC_ALWAYS_INLINE tsdn_t *
tsdn_fetch(void)
{
if (!tsd_booted_get())
return (NULL);
@@ -107,14 +100,12 @@ tsdn_fetch(void)
JEMALLOC_ALWAYS_INLINE bool
tsdn_null(const tsdn_t *tsdn)
{
return (tsdn == NULL);
}
JEMALLOC_ALWAYS_INLINE tsd_t *
tsdn_tsd(tsdn_t *tsdn)
{
assert(!tsdn_null(tsdn));
return (&tsdn->tsd);
@@ -123,7 +114,6 @@ tsdn_tsd(tsdn_t *tsdn)
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback)
{
/*
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
* return a pointer to it.

View File

@@ -177,7 +177,6 @@ a_attr bool a_name##tsd_booted = false;
a_attr bool \
a_name##tsd_cleanup_wrapper(void) \
{ \
\
if (a_name##tsd_initialized) { \
a_name##tsd_initialized = false; \
a_cleanup(&a_name##tsd_tls); \
@@ -187,7 +186,6 @@ a_name##tsd_cleanup_wrapper(void) \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
malloc_tsd_cleanup_register( \
&a_name##tsd_cleanup_wrapper); \
@@ -198,39 +196,33 @@ a_name##tsd_boot0(void) \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
if (likely(&a_name##tsd_tls != val)) \
a_name##tsd_tls = (*val); \
@@ -244,7 +236,6 @@ a_name##tsd_set(a_type *val) \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (a_cleanup != malloc_tsd_no_cleanup) { \
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
0) \
@@ -256,39 +247,33 @@ a_name##tsd_boot0(void) \
a_attr void \
a_name##tsd_boot1(void) \
{ \
\
/* Do nothing. */ \
} \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
return (a_name##tsd_boot0()); \
} \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (false); \
} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(bool init) \
{ \
\
assert(a_name##tsd_booted); \
return (&a_name##tsd_tls); \
} \
a_attr void \
a_name##tsd_set(a_type *val) \
{ \
\
assert(a_name##tsd_booted); \
if (likely(&a_name##tsd_tls != val)) \
a_name##tsd_tls = (*val); \
@@ -331,7 +316,6 @@ a_name##tsd_cleanup_wrapper(void) \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
" TSD for "#a_name"\n"); \
@@ -364,7 +348,6 @@ a_name##tsd_wrapper_get(bool init) \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
a_name##tsd_tsd = TlsAlloc(); \
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
return (true); \
@@ -394,7 +377,6 @@ a_name##tsd_boot1(void) \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
@@ -403,13 +385,11 @@ a_name##tsd_boot(void) \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \
@@ -466,7 +446,6 @@ a_name##tsd_cleanup_wrapper(void *arg) \
a_attr void \
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
{ \
\
if (pthread_setspecific(a_name##tsd_tsd, \
(void *)wrapper)) { \
malloc_write("<jemalloc>: Error setting" \
@@ -506,7 +485,6 @@ a_name##tsd_wrapper_get(bool init) \
a_attr bool \
a_name##tsd_boot0(void) \
{ \
\
if (pthread_key_create(&a_name##tsd_tsd, \
a_name##tsd_cleanup_wrapper) != 0) \
return (true); \
@@ -532,7 +510,6 @@ a_name##tsd_boot1(void) \
a_attr bool \
a_name##tsd_boot(void) \
{ \
\
if (a_name##tsd_boot0()) \
return (true); \
a_name##tsd_boot1(); \
@@ -541,13 +518,11 @@ a_name##tsd_boot(void) \
a_attr bool \
a_name##tsd_booted_get(void) \
{ \
\
return (a_name##tsd_booted); \
} \
a_attr bool \
a_name##tsd_get_allocates(void) \
{ \
\
return (true); \
} \
/* Get/set. */ \

View File

@@ -27,28 +27,24 @@ int get_errno(void);
JEMALLOC_ALWAYS_INLINE unsigned
ffs_llu(unsigned long long bitmap)
{
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_lu(unsigned long bitmap)
{
return (JEMALLOC_INTERNAL_FFSL(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u(unsigned bitmap)
{
return (JEMALLOC_INTERNAL_FFS(bitmap));
}
JEMALLOC_ALWAYS_INLINE unsigned
ffs_zu(size_t bitmap)
{
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
return (ffs_u(bitmap));
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
@@ -63,7 +59,6 @@ ffs_zu(size_t bitmap)
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u64(uint64_t bitmap)
{
#if LG_SIZEOF_LONG == 3
return (ffs_lu(bitmap));
#elif LG_SIZEOF_LONG_LONG == 3
@@ -76,7 +71,6 @@ ffs_u64(uint64_t bitmap)
JEMALLOC_ALWAYS_INLINE unsigned
ffs_u32(uint32_t bitmap)
{
#if LG_SIZEOF_INT == 2
return (ffs_u(bitmap));
#else
@@ -88,7 +82,6 @@ ffs_u32(uint32_t bitmap)
JEMALLOC_INLINE uint64_t
pow2_ceil_u64(uint64_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
@@ -103,7 +96,6 @@ pow2_ceil_u64(uint64_t x)
JEMALLOC_INLINE uint32_t
pow2_ceil_u32(uint32_t x)
{
x--;
x |= x >> 1;
x |= x >> 2;
@@ -118,7 +110,6 @@ pow2_ceil_u32(uint32_t x)
JEMALLOC_INLINE size_t
pow2_ceil_zu(size_t x)
{
#if (LG_SIZEOF_PTR == 3)
return (pow2_ceil_u64(x));
#else
@@ -163,7 +154,6 @@ lg_floor(size_t x)
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
@@ -178,7 +168,6 @@ lg_floor(size_t x)
JEMALLOC_INLINE unsigned
lg_floor(size_t x)
{
assert(x != 0);
x |= (x >> 1);
@@ -200,7 +189,6 @@ lg_floor(size_t x)
JEMALLOC_INLINE void
set_errno(int errnum)
{
#ifdef _WIN32
SetLastError(errnum);
#else
@@ -212,7 +200,6 @@ set_errno(int errnum)
JEMALLOC_INLINE int
get_errno(void)
{
#ifdef _WIN32
return (GetLastError());
#else

View File

@@ -17,7 +17,6 @@ static __forceinline int ffsl(long x)
static __forceinline int ffs(int x)
{
return (ffsl(x));
}