Update brace style.
Add braces around single-line blocks, and remove line breaks before function-opening braces. This resolves #537.
This commit is contained in:
@@ -14,32 +14,27 @@ bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
arena_ind_get(const arena_t *arena)
|
||||
{
|
||||
arena_ind_get(const arena_t *arena) {
|
||||
return (base_ind_get(arena->base));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_internal_add(arena_t *arena, size_t size)
|
||||
{
|
||||
arena_internal_add(arena_t *arena, size_t size) {
|
||||
atomic_add_zu(&arena->stats.internal, size);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_internal_sub(arena_t *arena, size_t size)
|
||||
{
|
||||
arena_internal_sub(arena_t *arena, size_t size) {
|
||||
atomic_sub_zu(&arena->stats.internal, size);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_internal_get(arena_t *arena)
|
||||
{
|
||||
arena_internal_get(arena_t *arena) {
|
||||
return (atomic_read_zu(&arena->stats.internal));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
assert(prof_interval != 0);
|
||||
|
||||
@@ -52,22 +47,22 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
if (likely(prof_interval == 0))
|
||||
if (likely(prof_interval == 0)) {
|
||||
return (false);
|
||||
}
|
||||
return (arena_prof_accum_impl(arena, accumbytes));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
|
||||
{
|
||||
arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
if (likely(prof_interval == 0))
|
||||
if (likely(prof_interval == 0)) {
|
||||
return (false);
|
||||
}
|
||||
|
||||
{
|
||||
bool ret;
|
||||
|
@@ -23,39 +23,37 @@ void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
JEMALLOC_INLINE szind_t
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||
{
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin) {
|
||||
szind_t binind = (szind_t)(bin - arena->bins);
|
||||
assert(binind < NBINS);
|
||||
return (binind);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE prof_tctx_t *
|
||||
arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
{
|
||||
arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (unlikely(!extent_slab_get(extent)))
|
||||
if (unlikely(!extent_slab_get(extent))) {
|
||||
return (large_prof_tctx_get(tsdn, extent));
|
||||
}
|
||||
return ((prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
size_t usize, prof_tctx_t *tctx)
|
||||
{
|
||||
size_t usize, prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (unlikely(!extent_slab_get(extent)))
|
||||
if (unlikely(!extent_slab_get(extent))) {
|
||||
large_prof_tctx_set(tsdn, extent, tctx);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
prof_tctx_t *tctx)
|
||||
{
|
||||
prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(!extent_slab_get(extent));
|
||||
@@ -64,24 +62,25 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
|
||||
{
|
||||
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
|
||||
tsd_t *tsd;
|
||||
ticker_t *decay_ticker;
|
||||
|
||||
if (unlikely(tsdn_null(tsdn)))
|
||||
if (unlikely(tsdn_null(tsdn))) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
|
||||
if (unlikely(decay_ticker == NULL))
|
||||
if (unlikely(decay_ticker == NULL)) {
|
||||
return;
|
||||
if (unlikely(ticker_ticks(decay_ticker, nticks)))
|
||||
}
|
||||
if (unlikely(ticker_ticks(decay_ticker, nticks))) {
|
||||
arena_purge(tsdn, arena, false);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
|
||||
{
|
||||
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
||||
malloc_mutex_assert_not_owner(tsdn, &arena->lock);
|
||||
|
||||
arena_decay_ticks(tsdn, arena, 1);
|
||||
@@ -89,8 +88,7 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
tcache_t *tcache, bool slow_path)
|
||||
{
|
||||
tcache_t *tcache, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(size != 0);
|
||||
|
||||
@@ -111,31 +109,29 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
arena_aalloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return (extent_arena_get(iealloc(tsdn, ptr)));
|
||||
}
|
||||
|
||||
/* Return the size of the allocation pointed to by ptr. */
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
{
|
||||
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
|
||||
size_t ret;
|
||||
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (likely(extent_slab_get(extent)))
|
||||
if (likely(extent_slab_get(extent))) {
|
||||
ret = index2size(extent_slab_data_get_const(extent)->binind);
|
||||
else
|
||||
} else {
|
||||
ret = large_salloc(tsdn, extent);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
bool slow_path)
|
||||
{
|
||||
bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@@ -160,15 +156,15 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache,
|
||||
ptr, usize, slow_path);
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
tcache_t *tcache, bool slow_path)
|
||||
{
|
||||
tcache_t *tcache, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@@ -192,8 +188,9 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size, slow_path);
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
large_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -37,8 +37,9 @@
|
||||
|
||||
#ifndef assert_not_implemented
|
||||
#define assert_not_implemented(e) do { \
|
||||
if (unlikely(config_debug && !(e))) \
|
||||
if (unlikely(config_debug && !(e))) { \
|
||||
not_implemented(); \
|
||||
} \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
@@ -53,8 +53,7 @@ void atomic_write_u(unsigned *p, unsigned x);
|
||||
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
||||
# if (defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_add_u64(uint64_t *p, uint64_t x) {
|
||||
uint64_t t = x;
|
||||
|
||||
asm volatile (
|
||||
@@ -67,8 +66,7 @@ atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x) {
|
||||
uint64_t t;
|
||||
|
||||
x = (uint64_t)(-(int64_t)x);
|
||||
@@ -83,8 +81,7 @@ atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
|
||||
uint8_t success;
|
||||
|
||||
asm volatile (
|
||||
@@ -99,8 +96,7 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_write_u64(uint64_t *p, uint64_t x) {
|
||||
asm volatile (
|
||||
"xchgq %1, %0;" /* Lock is implied by xchgq. */
|
||||
: "=m" (*p), "+r" (x) /* Outputs. */
|
||||
@@ -110,36 +106,31 @@ atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
}
|
||||
# elif (defined(JEMALLOC_C11ATOMICS))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_add_u64(uint64_t *p, uint64_t x) {
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
return (atomic_fetch_add(a, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x) {
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
return (atomic_fetch_sub(a, x) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
return (!atomic_compare_exchange_strong(a, &c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_write_u64(uint64_t *p, uint64_t x) {
|
||||
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
||||
atomic_store(a, x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_add_u64(uint64_t *p, uint64_t x) {
|
||||
/*
|
||||
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
||||
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
||||
@@ -150,50 +141,43 @@ atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x) {
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_write_u64(uint64_t *p, uint64_t x) {
|
||||
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
||||
|
||||
atomic_store_rel_long(p, x);
|
||||
}
|
||||
# elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_add_u64(uint64_t *p, uint64_t x) {
|
||||
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x) {
|
||||
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
|
||||
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_write_u64(uint64_t *p, uint64_t x) {
|
||||
uint64_t o;
|
||||
|
||||
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
||||
@@ -203,20 +187,17 @@ atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
}
|
||||
# elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_add_u64(uint64_t *p, uint64_t x) {
|
||||
return (InterlockedExchangeAdd64(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x) {
|
||||
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
|
||||
uint64_t o;
|
||||
|
||||
o = InterlockedCompareExchange64(p, s, c);
|
||||
@@ -224,33 +205,28 @@ atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_write_u64(uint64_t *p, uint64_t x) {
|
||||
InterlockedExchange64(p, x);
|
||||
}
|
||||
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
|
||||
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_add_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_add_u64(uint64_t *p, uint64_t x) {
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_sub_u64(uint64_t *p, uint64_t x) {
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
||||
{
|
||||
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s) {
|
||||
return (!__sync_bool_compare_and_swap(p, c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
{
|
||||
atomic_write_u64(uint64_t *p, uint64_t x) {
|
||||
__sync_lock_test_and_set(p, x);
|
||||
}
|
||||
# else
|
||||
@@ -262,8 +238,7 @@ atomic_write_u64(uint64_t *p, uint64_t x)
|
||||
/* 32-bit operations. */
|
||||
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_add_u32(uint32_t *p, uint32_t x) {
|
||||
uint32_t t = x;
|
||||
|
||||
asm volatile (
|
||||
@@ -276,8 +251,7 @@ atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x) {
|
||||
uint32_t t;
|
||||
|
||||
x = (uint32_t)(-(int32_t)x);
|
||||
@@ -292,8 +266,7 @@ atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
|
||||
uint8_t success;
|
||||
|
||||
asm volatile (
|
||||
@@ -308,8 +281,7 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_write_u32(uint32_t *p, uint32_t x) {
|
||||
asm volatile (
|
||||
"xchgl %1, %0;" /* Lock is implied by xchgl. */
|
||||
: "=m" (*p), "+r" (x) /* Outputs. */
|
||||
@@ -319,78 +291,66 @@ atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
}
|
||||
# elif (defined(JEMALLOC_C11ATOMICS))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_add_u32(uint32_t *p, uint32_t x) {
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
return (atomic_fetch_add(a, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x) {
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
return (atomic_fetch_sub(a, x) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
return (!atomic_compare_exchange_strong(a, &c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_write_u32(uint32_t *p, uint32_t x) {
|
||||
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
||||
atomic_store(a, x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_ATOMIC9))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_add_u32(uint32_t *p, uint32_t x) {
|
||||
return (atomic_fetchadd_32(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x) {
|
||||
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
|
||||
return (!atomic_cmpset_32(p, c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_write_u32(uint32_t *p, uint32_t x) {
|
||||
atomic_store_rel_32(p, x);
|
||||
}
|
||||
#elif (defined(JEMALLOC_OSATOMIC))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_add_u32(uint32_t *p, uint32_t x) {
|
||||
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x) {
|
||||
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
|
||||
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_write_u32(uint32_t *p, uint32_t x) {
|
||||
uint32_t o;
|
||||
|
||||
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
||||
@@ -400,20 +360,17 @@ atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_add_u32(uint32_t *p, uint32_t x) {
|
||||
return (InterlockedExchangeAdd(p, x) + x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x) {
|
||||
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
|
||||
uint32_t o;
|
||||
|
||||
o = InterlockedCompareExchange(p, s, c);
|
||||
@@ -421,33 +378,28 @@ atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_write_u32(uint32_t *p, uint32_t x) {
|
||||
InterlockedExchange(p, x);
|
||||
}
|
||||
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
|
||||
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_add_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_add_u32(uint32_t *p, uint32_t x) {
|
||||
return (__sync_add_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_sub_u32(uint32_t *p, uint32_t x) {
|
||||
return (__sync_sub_and_fetch(p, x));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
||||
{
|
||||
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s) {
|
||||
return (!__sync_bool_compare_and_swap(p, c, s));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
{
|
||||
atomic_write_u32(uint32_t *p, uint32_t x) {
|
||||
__sync_lock_test_and_set(p, x);
|
||||
}
|
||||
#else
|
||||
@@ -457,8 +409,7 @@ atomic_write_u32(uint32_t *p, uint32_t x)
|
||||
/******************************************************************************/
|
||||
/* Pointer operations. */
|
||||
JEMALLOC_INLINE void *
|
||||
atomic_add_p(void **p, void *x)
|
||||
{
|
||||
atomic_add_p(void **p, void *x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -467,8 +418,7 @@ atomic_add_p(void **p, void *x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
atomic_sub_p(void **p, void *x)
|
||||
{
|
||||
atomic_sub_p(void **p, void *x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -477,8 +427,7 @@ atomic_sub_p(void **p, void *x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_p(void **p, void *c, void *s)
|
||||
{
|
||||
atomic_cas_p(void **p, void *c, void *s) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -487,8 +436,7 @@ atomic_cas_p(void **p, void *c, void *s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_p(void **p, const void *x)
|
||||
{
|
||||
atomic_write_p(void **p, const void *x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
atomic_write_u64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -499,8 +447,7 @@ atomic_write_p(void **p, const void *x)
|
||||
/******************************************************************************/
|
||||
/* size_t operations. */
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_add_zu(size_t *p, size_t x)
|
||||
{
|
||||
atomic_add_zu(size_t *p, size_t x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -509,8 +456,7 @@ atomic_add_zu(size_t *p, size_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
atomic_sub_zu(size_t *p, size_t x)
|
||||
{
|
||||
atomic_sub_zu(size_t *p, size_t x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -519,8 +465,7 @@ atomic_sub_zu(size_t *p, size_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_zu(size_t *p, size_t c, size_t s)
|
||||
{
|
||||
atomic_cas_zu(size_t *p, size_t c, size_t s) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -529,8 +474,7 @@ atomic_cas_zu(size_t *p, size_t c, size_t s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_zu(size_t *p, size_t x)
|
||||
{
|
||||
atomic_write_zu(size_t *p, size_t x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
atomic_write_u64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_PTR == 2)
|
||||
@@ -541,8 +485,7 @@ atomic_write_zu(size_t *p, size_t x)
|
||||
/******************************************************************************/
|
||||
/* unsigned operations. */
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_add_u(unsigned *p, unsigned x)
|
||||
{
|
||||
atomic_add_u(unsigned *p, unsigned x) {
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
@@ -551,8 +494,7 @@ atomic_add_u(unsigned *p, unsigned x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
atomic_sub_u(unsigned *p, unsigned x)
|
||||
{
|
||||
atomic_sub_u(unsigned *p, unsigned x) {
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return ((unsigned)atomic_add_u64((uint64_t *)p,
|
||||
(uint64_t)-((int64_t)x)));
|
||||
@@ -563,8 +505,7 @@ atomic_sub_u(unsigned *p, unsigned x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
|
||||
{
|
||||
atomic_cas_u(unsigned *p, unsigned c, unsigned s) {
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
@@ -573,8 +514,7 @@ atomic_cas_u(unsigned *p, unsigned c, unsigned s)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
atomic_write_u(unsigned *p, unsigned x)
|
||||
{
|
||||
atomic_write_u(unsigned *p, unsigned x) {
|
||||
#if (LG_SIZEOF_INT == 3)
|
||||
atomic_write_u64((uint64_t *)p, (uint64_t)x);
|
||||
#elif (LG_SIZEOF_INT == 2)
|
||||
|
@@ -7,8 +7,7 @@ unsigned base_ind_get(const base_t *base);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BASE_C_))
|
||||
JEMALLOC_INLINE unsigned
|
||||
base_ind_get(const base_t *base)
|
||||
{
|
||||
base_ind_get(const base_t *base) {
|
||||
return (base->ind);
|
||||
}
|
||||
#endif
|
||||
|
@@ -11,8 +11,7 @@ void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_))
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
#ifdef BITMAP_USE_TREE
|
||||
size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1;
|
||||
bitmap_t rg = bitmap[rgoff];
|
||||
@@ -22,16 +21,16 @@ bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < binfo->ngroups; i++) {
|
||||
if (bitmap[i] != 0)
|
||||
if (bitmap[i] != 0) {
|
||||
return (false);
|
||||
}
|
||||
}
|
||||
return (true);
|
||||
#endif
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t g;
|
||||
|
||||
@@ -42,8 +41,7 @@ bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
@@ -69,8 +67,9 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)));
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (g != 0)
|
||||
if (g != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -78,8 +77,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
|
||||
/* sfu: set first unset. */
|
||||
JEMALLOC_INLINE size_t
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
{
|
||||
bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
|
||||
size_t bit;
|
||||
bitmap_t g;
|
||||
unsigned i;
|
||||
@@ -109,8 +107,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
{
|
||||
bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) {
|
||||
size_t goff;
|
||||
bitmap_t *gp;
|
||||
bitmap_t g;
|
||||
@@ -140,8 +137,9 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
|
||||
== 0);
|
||||
g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK);
|
||||
*gp = g;
|
||||
if (!propagate)
|
||||
if (!propagate) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* BITMAP_USE_TREE */
|
||||
|
@@ -43,8 +43,7 @@ int extent_snad_comp(const extent_t *a, const extent_t *b);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_))
|
||||
JEMALLOC_INLINE extent_t *
|
||||
extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
|
||||
{
|
||||
extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent) {
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
|
||||
@@ -53,132 +52,112 @@ extent_lookup(tsdn_t *tsdn, const void *ptr, bool dependent)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE arena_t *
|
||||
extent_arena_get(const extent_t *extent)
|
||||
{
|
||||
extent_arena_get(const extent_t *extent) {
|
||||
return (extent->e_arena);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_base_get(const extent_t *extent)
|
||||
{
|
||||
extent_base_get(const extent_t *extent) {
|
||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||
!extent->e_slab);
|
||||
return (PAGE_ADDR2BASE(extent->e_addr));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_addr_get(const extent_t *extent)
|
||||
{
|
||||
extent_addr_get(const extent_t *extent) {
|
||||
assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
|
||||
!extent->e_slab);
|
||||
return (extent->e_addr);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
extent_size_get(const extent_t *extent)
|
||||
{
|
||||
extent_size_get(const extent_t *extent) {
|
||||
return (extent->e_size);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
extent_usize_get(const extent_t *extent)
|
||||
{
|
||||
extent_usize_get(const extent_t *extent) {
|
||||
assert(!extent->e_slab);
|
||||
return (extent->e_usize);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_before_get(const extent_t *extent)
|
||||
{
|
||||
extent_before_get(const extent_t *extent) {
|
||||
return ((void *)((uintptr_t)extent_base_get(extent) - PAGE));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_last_get(const extent_t *extent)
|
||||
{
|
||||
extent_last_get(const extent_t *extent) {
|
||||
return ((void *)((uintptr_t)extent_base_get(extent) +
|
||||
extent_size_get(extent) - PAGE));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_past_get(const extent_t *extent)
|
||||
{
|
||||
extent_past_get(const extent_t *extent) {
|
||||
return ((void *)((uintptr_t)extent_base_get(extent) +
|
||||
extent_size_get(extent)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
extent_sn_get(const extent_t *extent)
|
||||
{
|
||||
extent_sn_get(const extent_t *extent) {
|
||||
return (extent->e_sn);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_active_get(const extent_t *extent)
|
||||
{
|
||||
extent_active_get(const extent_t *extent) {
|
||||
return (extent->e_active);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_retained_get(const extent_t *extent)
|
||||
{
|
||||
extent_retained_get(const extent_t *extent) {
|
||||
return (qr_next(extent, qr_link) == extent);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_zeroed_get(const extent_t *extent)
|
||||
{
|
||||
extent_zeroed_get(const extent_t *extent) {
|
||||
return (extent->e_zeroed);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_committed_get(const extent_t *extent)
|
||||
{
|
||||
extent_committed_get(const extent_t *extent) {
|
||||
return (extent->e_committed);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
extent_slab_get(const extent_t *extent)
|
||||
{
|
||||
extent_slab_get(const extent_t *extent) {
|
||||
return (extent->e_slab);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE arena_slab_data_t *
|
||||
extent_slab_data_get(extent_t *extent)
|
||||
{
|
||||
extent_slab_data_get(extent_t *extent) {
|
||||
assert(extent->e_slab);
|
||||
return (&extent->e_slab_data);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE const arena_slab_data_t *
|
||||
extent_slab_data_get_const(const extent_t *extent)
|
||||
{
|
||||
extent_slab_data_get_const(const extent_t *extent) {
|
||||
assert(extent->e_slab);
|
||||
return (&extent->e_slab_data);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE prof_tctx_t *
|
||||
extent_prof_tctx_get(const extent_t *extent)
|
||||
{
|
||||
extent_prof_tctx_get(const extent_t *extent) {
|
||||
return ((prof_tctx_t *)atomic_read_p(
|
||||
&((extent_t *)extent)->e_prof_tctx_pun));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_arena_set(extent_t *extent, arena_t *arena)
|
||||
{
|
||||
extent_arena_set(extent_t *extent, arena_t *arena) {
|
||||
extent->e_arena = arena;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_addr_set(extent_t *extent, void *addr)
|
||||
{
|
||||
extent_addr_set(extent_t *extent, void *addr) {
|
||||
extent->e_addr = addr;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
|
||||
{
|
||||
extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
|
||||
assert(extent_base_get(extent) == extent_addr_get(extent));
|
||||
|
||||
if (alignment < PAGE) {
|
||||
@@ -197,58 +176,49 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_size_set(extent_t *extent, size_t size)
|
||||
{
|
||||
extent_size_set(extent_t *extent, size_t size) {
|
||||
extent->e_size = size;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_usize_set(extent_t *extent, size_t usize)
|
||||
{
|
||||
extent_usize_set(extent_t *extent, size_t usize) {
|
||||
extent->e_usize = usize;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_sn_set(extent_t *extent, size_t sn)
|
||||
{
|
||||
extent_sn_set(extent_t *extent, size_t sn) {
|
||||
extent->e_sn = sn;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_active_set(extent_t *extent, bool active)
|
||||
{
|
||||
extent_active_set(extent_t *extent, bool active) {
|
||||
extent->e_active = active;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_zeroed_set(extent_t *extent, bool zeroed)
|
||||
{
|
||||
extent_zeroed_set(extent_t *extent, bool zeroed) {
|
||||
extent->e_zeroed = zeroed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_committed_set(extent_t *extent, bool committed)
|
||||
{
|
||||
extent_committed_set(extent_t *extent, bool committed) {
|
||||
extent->e_committed = committed;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_slab_set(extent_t *extent, bool slab)
|
||||
{
|
||||
extent_slab_set(extent_t *extent, bool slab) {
|
||||
extent->e_slab = slab;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
||||
{
|
||||
extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
|
||||
atomic_write_p(&extent->e_prof_tctx_pun, tctx);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
size_t usize, size_t sn, bool active, bool zeroed, bool committed,
|
||||
bool slab)
|
||||
{
|
||||
bool slab) {
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
|
||||
extent_arena_set(extent, arena);
|
||||
@@ -260,26 +230,24 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
extent_committed_set(extent, committed);
|
||||
extent_slab_set(extent, slab);
|
||||
if (config_prof)
|
||||
if (config_prof) {
|
||||
extent_prof_tctx_set(extent, NULL);
|
||||
}
|
||||
qr_new(extent, qr_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_ring_insert(extent_t *sentinel, extent_t *extent)
|
||||
{
|
||||
extent_ring_insert(extent_t *sentinel, extent_t *extent) {
|
||||
qr_meld(sentinel, extent, extent_t, qr_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_ring_remove(extent_t *extent)
|
||||
{
|
||||
extent_ring_remove(extent_t *extent) {
|
||||
qr_remove(extent, qr_link);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
extent_sn_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
extent_sn_comp(const extent_t *a, const extent_t *b) {
|
||||
size_t a_sn = extent_sn_get(a);
|
||||
size_t b_sn = extent_sn_get(b);
|
||||
|
||||
@@ -287,8 +255,7 @@ extent_sn_comp(const extent_t *a, const extent_t *b)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
extent_ad_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
extent_ad_comp(const extent_t *a, const extent_t *b) {
|
||||
uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
|
||||
uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
|
||||
|
||||
@@ -296,13 +263,13 @@ extent_ad_comp(const extent_t *a, const extent_t *b)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int
|
||||
extent_snad_comp(const extent_t *a, const extent_t *b)
|
||||
{
|
||||
extent_snad_comp(const extent_t *a, const extent_t *b) {
|
||||
int ret;
|
||||
|
||||
ret = extent_sn_comp(a, b);
|
||||
if (ret != 0)
|
||||
if (ret != 0) {
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ret = extent_ad_comp(a, b);
|
||||
return (ret);
|
||||
|
@@ -21,20 +21,17 @@ void hash(const void *key, size_t len, const uint32_t seed,
|
||||
/******************************************************************************/
|
||||
/* Internal implementation. */
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_rotl_32(uint32_t x, int8_t r)
|
||||
{
|
||||
hash_rotl_32(uint32_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (32 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_rotl_64(uint64_t x, int8_t r)
|
||||
{
|
||||
hash_rotl_64(uint64_t x, int8_t r) {
|
||||
return ((x << r) | (x >> (64 - r)));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_get_block_32(const uint32_t *p, int i)
|
||||
{
|
||||
hash_get_block_32(const uint32_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
|
||||
uint32_t ret;
|
||||
@@ -47,8 +44,7 @@ hash_get_block_32(const uint32_t *p, int i)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_get_block_64(const uint64_t *p, int i)
|
||||
{
|
||||
hash_get_block_64(const uint64_t *p, int i) {
|
||||
/* Handle unaligned read. */
|
||||
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
|
||||
uint64_t ret;
|
||||
@@ -61,8 +57,7 @@ hash_get_block_64(const uint64_t *p, int i)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_fmix_32(uint32_t h)
|
||||
{
|
||||
hash_fmix_32(uint32_t h) {
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
h ^= h >> 13;
|
||||
@@ -73,8 +68,7 @@ hash_fmix_32(uint32_t h)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
hash_fmix_64(uint64_t k)
|
||||
{
|
||||
hash_fmix_64(uint64_t k) {
|
||||
k ^= k >> 33;
|
||||
k *= KQU(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
@@ -85,8 +79,7 @@ hash_fmix_64(uint64_t k)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
{
|
||||
hash_x86_32(const void *key, int len, uint32_t seed) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
|
||||
@@ -137,8 +130,7 @@ hash_x86_32(const void *key, int len, uint32_t seed)
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t * data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
@@ -239,8 +231,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
|
||||
|
||||
UNUSED JEMALLOC_INLINE void
|
||||
hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
uint64_t r_out[2])
|
||||
{
|
||||
uint64_t r_out[2]) {
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
@@ -318,8 +309,7 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
|
||||
/******************************************************************************/
|
||||
/* API. */
|
||||
JEMALLOC_INLINE void
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2])
|
||||
{
|
||||
hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) {
|
||||
assert(len <= INT_MAX); /* Unfortunate implementation limitation. */
|
||||
|
||||
#if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN))
|
||||
|
@@ -550,10 +550,10 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||
psz2ind(size_t psz)
|
||||
{
|
||||
if (unlikely(psz > LARGE_MAXCLASS))
|
||||
psz2ind(size_t psz) {
|
||||
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||
return (NPSIZES);
|
||||
}
|
||||
{
|
||||
pszind_t x = lg_floor((psz<<1)-1);
|
||||
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
|
||||
@@ -573,10 +573,10 @@ psz2ind(size_t psz)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
pind2sz_compute(pszind_t pind)
|
||||
{
|
||||
if (unlikely(pind == NPSIZES))
|
||||
pind2sz_compute(pszind_t pind) {
|
||||
if (unlikely(pind == NPSIZES)) {
|
||||
return (LARGE_MAXCLASS + PAGE);
|
||||
}
|
||||
{
|
||||
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
|
||||
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||
@@ -595,25 +595,23 @@ pind2sz_compute(pszind_t pind)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
pind2sz_lookup(pszind_t pind)
|
||||
{
|
||||
pind2sz_lookup(pszind_t pind) {
|
||||
size_t ret = (size_t)pind2sz_tab[pind];
|
||||
assert(ret == pind2sz_compute(pind));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
pind2sz(pszind_t pind)
|
||||
{
|
||||
pind2sz(pszind_t pind) {
|
||||
assert(pind < NPSIZES+1);
|
||||
return (pind2sz_lookup(pind));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
psz2u(size_t psz)
|
||||
{
|
||||
if (unlikely(psz > LARGE_MAXCLASS))
|
||||
psz2u(size_t psz) {
|
||||
if (unlikely(psz > LARGE_MAXCLASS)) {
|
||||
return (LARGE_MAXCLASS + PAGE);
|
||||
}
|
||||
{
|
||||
size_t x = lg_floor((psz<<1)-1);
|
||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||
@@ -626,10 +624,10 @@ psz2u(size_t psz)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE szind_t
|
||||
size2index_compute(size_t size)
|
||||
{
|
||||
if (unlikely(size > LARGE_MAXCLASS))
|
||||
size2index_compute(size_t size) {
|
||||
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||
return (NSIZES);
|
||||
}
|
||||
#if (NTBINS != 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||
@@ -656,8 +654,7 @@ size2index_compute(size_t size)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
size2index_lookup(size_t size)
|
||||
{
|
||||
size2index_lookup(size_t size) {
|
||||
assert(size <= LOOKUP_MAXCLASS);
|
||||
{
|
||||
szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
|
||||
@@ -667,20 +664,20 @@ size2index_lookup(size_t size)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE szind_t
|
||||
size2index(size_t size)
|
||||
{
|
||||
size2index(size_t size) {
|
||||
assert(size > 0);
|
||||
if (likely(size <= LOOKUP_MAXCLASS))
|
||||
if (likely(size <= LOOKUP_MAXCLASS)) {
|
||||
return (size2index_lookup(size));
|
||||
}
|
||||
return (size2index_compute(size));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
index2size_compute(szind_t index)
|
||||
{
|
||||
index2size_compute(szind_t index) {
|
||||
#if (NTBINS > 0)
|
||||
if (index < NTBINS)
|
||||
if (index < NTBINS) {
|
||||
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
|
||||
}
|
||||
#endif
|
||||
{
|
||||
size_t reduced_index = index - NTBINS;
|
||||
@@ -702,25 +699,23 @@ index2size_compute(szind_t index)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
index2size_lookup(szind_t index)
|
||||
{
|
||||
index2size_lookup(szind_t index) {
|
||||
size_t ret = (size_t)index2size_tab[index];
|
||||
assert(ret == index2size_compute(index));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
index2size(szind_t index)
|
||||
{
|
||||
index2size(szind_t index) {
|
||||
assert(index < NSIZES);
|
||||
return (index2size_lookup(index));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u_compute(size_t size)
|
||||
{
|
||||
if (unlikely(size > LARGE_MAXCLASS))
|
||||
s2u_compute(size_t size) {
|
||||
if (unlikely(size > LARGE_MAXCLASS)) {
|
||||
return (0);
|
||||
}
|
||||
#if (NTBINS > 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||
@@ -741,8 +736,7 @@ s2u_compute(size_t size)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u_lookup(size_t size)
|
||||
{
|
||||
s2u_lookup(size_t size) {
|
||||
size_t ret = index2size_lookup(size2index_lookup(size));
|
||||
|
||||
assert(ret == s2u_compute(size));
|
||||
@@ -754,11 +748,11 @@ s2u_lookup(size_t size)
|
||||
* specified size.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u(size_t size)
|
||||
{
|
||||
s2u(size_t size) {
|
||||
assert(size > 0);
|
||||
if (likely(size <= LOOKUP_MAXCLASS))
|
||||
if (likely(size <= LOOKUP_MAXCLASS)) {
|
||||
return (s2u_lookup(size));
|
||||
}
|
||||
return (s2u_compute(size));
|
||||
}
|
||||
|
||||
@@ -767,8 +761,7 @@ s2u(size_t size)
|
||||
* specified size and alignment.
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sa2u(size_t size, size_t alignment)
|
||||
{
|
||||
sa2u(size_t size, size_t alignment) {
|
||||
size_t usize;
|
||||
|
||||
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
||||
@@ -790,19 +783,21 @@ sa2u(size_t size, size_t alignment)
|
||||
* 192 | 11000000 | 64
|
||||
*/
|
||||
usize = s2u(ALIGNMENT_CEILING(size, alignment));
|
||||
if (usize < LARGE_MINCLASS)
|
||||
if (usize < LARGE_MINCLASS) {
|
||||
return (usize);
|
||||
}
|
||||
}
|
||||
|
||||
/* Large size class. Beware of overflow. */
|
||||
|
||||
if (unlikely(alignment > LARGE_MAXCLASS))
|
||||
if (unlikely(alignment > LARGE_MAXCLASS)) {
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Make sure result is a large size class. */
|
||||
if (size <= LARGE_MINCLASS)
|
||||
if (size <= LARGE_MINCLASS) {
|
||||
usize = LARGE_MINCLASS;
|
||||
else {
|
||||
} else {
|
||||
usize = s2u(size);
|
||||
if (usize < size) {
|
||||
/* size_t overflow. */
|
||||
@@ -823,35 +818,33 @@ sa2u(size_t size, size_t alignment)
|
||||
|
||||
/* Choose an arena based on a per-thread value. */
|
||||
JEMALLOC_INLINE arena_t *
|
||||
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
|
||||
{
|
||||
arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
||||
arena_t *ret;
|
||||
|
||||
if (arena != NULL)
|
||||
if (arena != NULL) {
|
||||
return (arena);
|
||||
}
|
||||
|
||||
ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
|
||||
if (unlikely(ret == NULL))
|
||||
if (unlikely(ret == NULL)) {
|
||||
ret = arena_choose_hard(tsd, internal);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE arena_t *
|
||||
arena_choose(tsd_t *tsd, arena_t *arena)
|
||||
{
|
||||
arena_choose(tsd_t *tsd, arena_t *arena) {
|
||||
return (arena_choose_impl(tsd, arena, false));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE arena_t *
|
||||
arena_ichoose(tsd_t *tsd, arena_t *arena)
|
||||
{
|
||||
arena_ichoose(tsd_t *tsd, arena_t *arena) {
|
||||
return (arena_choose_impl(tsd, arena, true));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE arena_tdata_t *
|
||||
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
|
||||
{
|
||||
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
|
||||
arena_tdata_t *tdata;
|
||||
arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
|
||||
|
||||
@@ -869,14 +862,14 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
|
||||
}
|
||||
|
||||
tdata = &arenas_tdata[ind];
|
||||
if (likely(tdata != NULL) || !refresh_if_missing)
|
||||
if (likely(tdata != NULL) || !refresh_if_missing) {
|
||||
return (tdata);
|
||||
}
|
||||
return (arena_tdata_get_hard(tsd, ind));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE arena_t *
|
||||
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
|
||||
{
|
||||
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
|
||||
arena_t *ret;
|
||||
|
||||
assert(ind <= MALLOCX_ARENA_MAX);
|
||||
@@ -893,13 +886,13 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE ticker_t *
|
||||
decay_ticker_get(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
decay_ticker_get(tsd_t *tsd, unsigned ind) {
|
||||
arena_tdata_t *tdata;
|
||||
|
||||
tdata = arena_tdata_get(tsd, ind, true);
|
||||
if (unlikely(tdata == NULL))
|
||||
if (unlikely(tdata == NULL)) {
|
||||
return (NULL);
|
||||
}
|
||||
return (&tdata->decay_ticker);
|
||||
}
|
||||
#endif
|
||||
@@ -917,8 +910,7 @@ extent_t *iealloc(tsdn_t *tsdn, const void *ptr);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
iealloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
iealloc(tsdn_t *tsdn, const void *ptr) {
|
||||
return (extent_lookup(tsdn, ptr, true));
|
||||
}
|
||||
#endif
|
||||
@@ -958,8 +950,7 @@ bool ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||
iaalloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
iaalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
return (arena_aalloc(tsdn, ptr));
|
||||
@@ -973,8 +964,7 @@ iaalloc(tsdn_t *tsdn, const void *ptr)
|
||||
* size_t sz = isalloc(tsdn, extent, ptr);
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
{
|
||||
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
|
||||
assert(ptr != NULL);
|
||||
|
||||
return (arena_salloc(tsdn, extent, ptr));
|
||||
@@ -982,8 +972,7 @@ isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
bool is_internal, arena_t *arena, bool slow_path)
|
||||
{
|
||||
bool is_internal, arena_t *arena, bool slow_path) {
|
||||
void *ret;
|
||||
|
||||
assert(size != 0);
|
||||
@@ -1000,16 +989,14 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
|
||||
{
|
||||
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
||||
return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
|
||||
false, NULL, slow_path));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, bool is_internal, arena_t *arena)
|
||||
{
|
||||
tcache_t *tcache, bool is_internal, arena_t *arena) {
|
||||
void *ret;
|
||||
|
||||
assert(usize != 0);
|
||||
@@ -1029,21 +1016,18 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
tcache_t *tcache, arena_t *arena) {
|
||||
return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
|
||||
{
|
||||
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
|
||||
return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
|
||||
tcache_get(tsd, true), false, NULL));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
ivsalloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
ivsalloc(tsdn_t *tsdn, const void *ptr) {
|
||||
extent_t *extent;
|
||||
|
||||
/*
|
||||
@@ -1055,8 +1039,9 @@ ivsalloc(tsdn_t *tsdn, const void *ptr)
|
||||
* failure.
|
||||
* */
|
||||
extent = extent_lookup(tsdn, ptr, false);
|
||||
if (extent == NULL)
|
||||
if (extent == NULL) {
|
||||
return (0);
|
||||
}
|
||||
assert(extent_active_get(extent));
|
||||
/* Only slab members should be looked up via interior pointers. */
|
||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||
@@ -1066,8 +1051,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
bool is_internal, bool slow_path)
|
||||
{
|
||||
bool is_internal, bool slow_path) {
|
||||
assert(ptr != NULL);
|
||||
assert(!is_internal || tcache == NULL);
|
||||
assert(!is_internal || arena_ind_get(iaalloc(tsdn, ptr)) <
|
||||
@@ -1081,41 +1065,42 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloc(tsd_t *tsd, extent_t *extent, void *ptr)
|
||||
{
|
||||
idalloc(tsd_t *tsd, extent_t *extent, void *ptr) {
|
||||
idalloctm(tsd_tsdn(tsd), extent, ptr, tcache_get(tsd, false), false,
|
||||
true);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isdalloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
tcache_t *tcache, bool slow_path)
|
||||
{
|
||||
tcache_t *tcache, bool slow_path) {
|
||||
arena_sdalloc(tsdn, extent, ptr, size, tcache, slow_path);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache,
|
||||
arena_t *arena)
|
||||
{
|
||||
arena_t *arena) {
|
||||
void *p;
|
||||
size_t usize, copysize;
|
||||
|
||||
usize = sa2u(size + extra, alignment);
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
return (NULL);
|
||||
}
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
if (p == NULL) {
|
||||
if (extra == 0)
|
||||
if (extra == 0) {
|
||||
return (NULL);
|
||||
}
|
||||
/* Try again, without extra this time. */
|
||||
usize = sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS))
|
||||
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
|
||||
return (NULL);
|
||||
}
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
if (p == NULL)
|
||||
if (p == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Copy at most size bytes (not size+extra), since the caller has no
|
||||
@@ -1129,8 +1114,7 @@ iralloct_realign(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
|
||||
{
|
||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena) {
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
|
||||
@@ -1150,16 +1134,14 @@ iralloct(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloc(tsd_t *tsd, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
||||
size_t alignment, bool zero)
|
||||
{
|
||||
size_t alignment, bool zero) {
|
||||
return (iralloct(tsd_tsdn(tsd), extent, ptr, oldsize, size, alignment,
|
||||
zero, tcache_get(tsd, true), NULL));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
ixalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, size_t alignment, bool zero)
|
||||
{
|
||||
size_t extra, size_t alignment, bool zero) {
|
||||
assert(ptr != NULL);
|
||||
assert(size != 0);
|
||||
|
||||
|
@@ -61,8 +61,7 @@ typedef intptr_t ssize_t;
|
||||
# pragma warning(disable: 4996)
|
||||
#if _MSC_VER < 1800
|
||||
static int
|
||||
isblank(int c)
|
||||
{
|
||||
isblank(int c) {
|
||||
return (c == '\t' || c == ' ');
|
||||
}
|
||||
#endif
|
||||
|
@@ -10,8 +10,7 @@ void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (isthreaded) {
|
||||
witness_assert_not_owner(tsdn, &mutex->witness);
|
||||
#ifdef _WIN32
|
||||
@@ -32,8 +31,7 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (isthreaded) {
|
||||
witness_unlock(tsdn, &mutex->witness);
|
||||
#ifdef _WIN32
|
||||
@@ -53,17 +51,17 @@ malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
if (isthreaded)
|
||||
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (isthreaded) {
|
||||
witness_assert_owner(tsdn, &mutex->witness);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
|
||||
{
|
||||
if (isthreaded)
|
||||
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
|
||||
if (isthreaded) {
|
||||
witness_assert_not_owner(tsdn, &mutex->witness);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -58,17 +58,18 @@ struct { \
|
||||
phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
|
||||
phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
|
||||
phn_next_set(a_type, a_field, a_phn1, phn0child); \
|
||||
if (phn0child != NULL) \
|
||||
if (phn0child != NULL) { \
|
||||
phn_prev_set(a_type, a_field, phn0child, a_phn1); \
|
||||
} \
|
||||
phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
|
||||
} while (0)
|
||||
|
||||
#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
|
||||
if (a_phn0 == NULL) \
|
||||
if (a_phn0 == NULL) { \
|
||||
r_phn = a_phn1; \
|
||||
else if (a_phn1 == NULL) \
|
||||
} else if (a_phn1 == NULL) { \
|
||||
r_phn = a_phn0; \
|
||||
else if (a_cmp(a_phn0, a_phn1) < 0) { \
|
||||
} else if (a_cmp(a_phn0, a_phn1) < 0) { \
|
||||
phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
|
||||
a_cmp); \
|
||||
r_phn = a_phn0; \
|
||||
@@ -95,8 +96,9 @@ struct { \
|
||||
*/ \
|
||||
if (phn1 != NULL) { \
|
||||
a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
|
||||
if (phnrest != NULL) \
|
||||
if (phnrest != NULL) { \
|
||||
phn_prev_set(a_type, a_field, phnrest, NULL); \
|
||||
} \
|
||||
phn_prev_set(a_type, a_field, phn0, NULL); \
|
||||
phn_next_set(a_type, a_field, phn0, NULL); \
|
||||
phn_prev_set(a_type, a_field, phn1, NULL); \
|
||||
@@ -150,8 +152,9 @@ struct { \
|
||||
NULL); \
|
||||
phn_merge(a_type, a_field, phn0, phn1, \
|
||||
a_cmp, phn0); \
|
||||
if (head == NULL) \
|
||||
if (head == NULL) { \
|
||||
break; \
|
||||
} \
|
||||
phn_next_set(a_type, a_field, tail, \
|
||||
phn0); \
|
||||
tail = phn0; \
|
||||
@@ -179,9 +182,9 @@ struct { \
|
||||
|
||||
#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
|
||||
a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
|
||||
if (lchild == NULL) \
|
||||
if (lchild == NULL) { \
|
||||
r_phn = NULL; \
|
||||
else { \
|
||||
} else { \
|
||||
ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
|
||||
r_phn); \
|
||||
} \
|
||||
@@ -205,26 +208,23 @@ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
|
||||
*/
|
||||
#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
|
||||
a_attr void \
|
||||
a_prefix##new(a_ph_type *ph) \
|
||||
{ \
|
||||
a_prefix##new(a_ph_type *ph) { \
|
||||
memset(ph, 0, sizeof(ph(a_type))); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_prefix##empty(a_ph_type *ph) \
|
||||
{ \
|
||||
a_prefix##empty(a_ph_type *ph) { \
|
||||
return (ph->ph_root == NULL); \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##first(a_ph_type *ph) \
|
||||
{ \
|
||||
if (ph->ph_root == NULL) \
|
||||
a_prefix##first(a_ph_type *ph) { \
|
||||
if (ph->ph_root == NULL) { \
|
||||
return (NULL); \
|
||||
} \
|
||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||
return (ph->ph_root); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##insert(a_ph_type *ph, a_type *phn) \
|
||||
{ \
|
||||
a_prefix##insert(a_ph_type *ph, a_type *phn) { \
|
||||
memset(&phn->a_field, 0, sizeof(phn(a_type))); \
|
||||
\
|
||||
/* \
|
||||
@@ -235,9 +235,9 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
|
||||
* constant-time, whereas eager merging would make insert \
|
||||
* O(log n). \
|
||||
*/ \
|
||||
if (ph->ph_root == NULL) \
|
||||
if (ph->ph_root == NULL) { \
|
||||
ph->ph_root = phn; \
|
||||
else { \
|
||||
} else { \
|
||||
phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
|
||||
a_field, ph->ph_root)); \
|
||||
if (phn_next_get(a_type, a_field, ph->ph_root) != \
|
||||
@@ -251,12 +251,12 @@ a_prefix##insert(a_ph_type *ph, a_type *phn) \
|
||||
} \
|
||||
} \
|
||||
a_attr a_type * \
|
||||
a_prefix##remove_first(a_ph_type *ph) \
|
||||
{ \
|
||||
a_prefix##remove_first(a_ph_type *ph) { \
|
||||
a_type *ret; \
|
||||
\
|
||||
if (ph->ph_root == NULL) \
|
||||
if (ph->ph_root == NULL) { \
|
||||
return (NULL); \
|
||||
} \
|
||||
ph_merge_aux(a_type, a_field, ph, a_cmp); \
|
||||
\
|
||||
ret = ph->ph_root; \
|
||||
@@ -267,8 +267,7 @@ a_prefix##remove_first(a_ph_type *ph) \
|
||||
return (ret); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_prefix##remove(a_ph_type *ph, a_type *phn) \
|
||||
{ \
|
||||
a_prefix##remove(a_ph_type *ph, a_type *phn) { \
|
||||
a_type *replace, *parent; \
|
||||
\
|
||||
/* \
|
||||
@@ -286,8 +285,9 @@ a_prefix##remove(a_ph_type *ph, a_type *phn) \
|
||||
\
|
||||
/* Get parent (if phn is leftmost child) before mutating. */ \
|
||||
if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
|
||||
if (phn_lchild_get(a_type, a_field, parent) != phn) \
|
||||
if (phn_lchild_get(a_type, a_field, parent) != phn) { \
|
||||
parent = NULL; \
|
||||
} \
|
||||
} \
|
||||
/* Find a possible replacement node, and link to parent. */ \
|
||||
ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
|
||||
|
@@ -18,20 +18,17 @@ size_t prng_range_zu(size_t *state, size_t range, bool atomic);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_state_next_u32(uint32_t state)
|
||||
{
|
||||
prng_state_next_u32(uint32_t state) {
|
||||
return ((state * PRNG_A_32) + PRNG_C_32);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_state_next_u64(uint64_t state)
|
||||
{
|
||||
prng_state_next_u64(uint64_t state) {
|
||||
return ((state * PRNG_A_64) + PRNG_C_64);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_state_next_zu(size_t state)
|
||||
{
|
||||
prng_state_next_zu(size_t state) {
|
||||
#if LG_SIZEOF_PTR == 2
|
||||
return ((state * PRNG_A_32) + PRNG_C_32);
|
||||
#elif LG_SIZEOF_PTR == 3
|
||||
@@ -42,8 +39,7 @@ prng_state_next_zu(size_t state)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
|
||||
{
|
||||
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) {
|
||||
uint32_t ret, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
@@ -67,8 +63,7 @@ prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic)
|
||||
|
||||
/* 64-bit atomic operations cannot be supported on all relevant platforms. */
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range)
|
||||
{
|
||||
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
||||
uint64_t ret, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
@@ -82,8 +77,7 @@ prng_lg_range_u64(uint64_t *state, unsigned lg_range)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
|
||||
{
|
||||
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) {
|
||||
size_t ret, state1;
|
||||
|
||||
assert(lg_range > 0);
|
||||
@@ -106,8 +100,7 @@ prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||
prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
|
||||
{
|
||||
prng_range_u32(uint32_t *state, uint32_t range, bool atomic) {
|
||||
uint32_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
@@ -125,8 +118,7 @@ prng_range_u32(uint32_t *state, uint32_t range, bool atomic)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uint64_t
|
||||
prng_range_u64(uint64_t *state, uint64_t range)
|
||||
{
|
||||
prng_range_u64(uint64_t *state, uint64_t range) {
|
||||
uint64_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
@@ -144,8 +136,7 @@ prng_range_u64(uint64_t *state, uint64_t range)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
prng_range_zu(size_t *state, size_t range, bool atomic)
|
||||
{
|
||||
prng_range_zu(size_t *state, size_t range, bool atomic) {
|
||||
size_t ret;
|
||||
unsigned lg_range;
|
||||
|
||||
|
@@ -27,8 +27,7 @@ void prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr,
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_active_get_unlocked(void)
|
||||
{
|
||||
prof_active_get_unlocked(void) {
|
||||
/*
|
||||
* Even if opt_prof is true, sampling can be temporarily disabled by
|
||||
* setting prof_active to false. No locking is used when reading
|
||||
@@ -39,8 +38,7 @@ prof_active_get_unlocked(void)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_gdump_get_unlocked(void)
|
||||
{
|
||||
prof_gdump_get_unlocked(void) {
|
||||
/*
|
||||
* No locking is used when reading prof_gdump_val in the fast path, so
|
||||
* there are no guarantees regarding how long it will take for all
|
||||
@@ -50,8 +48,7 @@ prof_gdump_get_unlocked(void)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tdata_t *
|
||||
prof_tdata_get(tsd_t *tsd, bool create)
|
||||
{
|
||||
prof_tdata_get(tsd_t *tsd, bool create) {
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
@@ -74,8 +71,7 @@ prof_tdata_get(tsd_t *tsd, bool create)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
{
|
||||
prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@@ -84,8 +80,7 @@ prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx)
|
||||
{
|
||||
prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@@ -94,8 +89,7 @@ prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
prof_tctx_t *tctx)
|
||||
{
|
||||
prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
|
||||
@@ -104,37 +98,40 @@ prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
||||
prof_tdata_t **tdata_out)
|
||||
{
|
||||
prof_tdata_t **tdata_out) {
|
||||
prof_tdata_t *tdata;
|
||||
|
||||
cassert(config_prof);
|
||||
|
||||
tdata = prof_tdata_get(tsd, true);
|
||||
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX))
|
||||
if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
|
||||
tdata = NULL;
|
||||
}
|
||||
|
||||
if (tdata_out != NULL)
|
||||
if (tdata_out != NULL) {
|
||||
*tdata_out = tdata;
|
||||
}
|
||||
|
||||
if (unlikely(tdata == NULL))
|
||||
if (unlikely(tdata == NULL)) {
|
||||
return (true);
|
||||
}
|
||||
|
||||
if (likely(tdata->bytes_until_sample >= usize)) {
|
||||
if (update)
|
||||
if (update) {
|
||||
tdata->bytes_until_sample -= usize;
|
||||
}
|
||||
return (true);
|
||||
} else {
|
||||
/* Compute new sample threshold. */
|
||||
if (update)
|
||||
if (update) {
|
||||
prof_sample_threshold_update(tdata);
|
||||
}
|
||||
return (!tdata->active);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
||||
{
|
||||
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
|
||||
prof_tctx_t *ret;
|
||||
prof_tdata_t *tdata;
|
||||
prof_bt_t bt;
|
||||
@@ -142,9 +139,9 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
||||
assert(usize == s2u(usize));
|
||||
|
||||
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
||||
&tdata)))
|
||||
&tdata))) {
|
||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
||||
else {
|
||||
} else {
|
||||
bt_init(&bt, tdata->vec);
|
||||
prof_backtrace(&bt);
|
||||
ret = prof_lookup(tsd, &bt);
|
||||
@@ -155,15 +152,14 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx)
|
||||
{
|
||||
prof_tctx_t *tctx) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(tsdn, extent, ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
|
||||
else {
|
||||
} else {
|
||||
prof_tctx_set(tsdn, extent, ptr, usize,
|
||||
(prof_tctx_t *)(uintptr_t)1U);
|
||||
}
|
||||
@@ -172,8 +168,7 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
||||
prof_tctx_t *tctx, bool prof_active, bool updated, extent_t *old_extent,
|
||||
const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx)
|
||||
{
|
||||
const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) {
|
||||
bool sampled, old_sampled, moved;
|
||||
|
||||
cassert(config_prof);
|
||||
@@ -230,15 +225,15 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize)
|
||||
{
|
||||
prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize) {
|
||||
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
|
||||
|
||||
cassert(config_prof);
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
|
||||
prof_free_sampled_object(tsd, usize, tctx);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -25,14 +25,12 @@ struct { \
|
||||
(a_qrelm)->a_field.qre_prev = (a_qr); \
|
||||
} while (0)
|
||||
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) \
|
||||
do \
|
||||
{ \
|
||||
#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
|
||||
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
|
||||
(a_qr)->a_field.qre_prev = (a_qrelm); \
|
||||
(a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
|
||||
(a_qrelm)->a_field.qre_next = (a_qr); \
|
||||
} while (0)
|
||||
} while (0)
|
||||
|
||||
#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
|
||||
a_type *t; \
|
||||
|
@@ -550,8 +550,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
|
||||
/* Find node's successor, in preparation for swap. */ \
|
||||
pathp->cmp = 1; \
|
||||
nodep = pathp; \
|
||||
for (pathp++; pathp->node != NULL; \
|
||||
pathp++) { \
|
||||
for (pathp++; pathp->node != NULL; pathp++) { \
|
||||
pathp->cmp = -1; \
|
||||
pathp[1].node = rbtn_left_get(a_type, a_field, \
|
||||
pathp->node); \
|
||||
|
@@ -37,12 +37,12 @@ void rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
rtree_start_level(const rtree_t *rtree, uintptr_t key)
|
||||
{
|
||||
rtree_start_level(const rtree_t *rtree, uintptr_t key) {
|
||||
unsigned start_level;
|
||||
|
||||
if (unlikely(key == 0))
|
||||
if (unlikely(key == 0)) {
|
||||
return (rtree->height - 1);
|
||||
}
|
||||
|
||||
start_level = rtree->start_level[(lg_floor(key) + 1) >>
|
||||
LG_RTREE_BITS_PER_LEVEL];
|
||||
@@ -52,8 +52,7 @@ rtree_start_level(const rtree_t *rtree, uintptr_t key)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key)
|
||||
{
|
||||
uintptr_t key) {
|
||||
unsigned start_level;
|
||||
uintptr_t key_diff;
|
||||
|
||||
@@ -72,48 +71,45 @@ rtree_ctx_start_level(const rtree_t *rtree, const rtree_ctx_t *rtree_ctx,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE uintptr_t
|
||||
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
|
||||
{
|
||||
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) {
|
||||
return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) -
|
||||
rtree->levels[level].cumbits)) & ((ZU(1) <<
|
||||
rtree->levels[level].bits) - 1));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
rtree_node_valid(rtree_elm_t *node)
|
||||
{
|
||||
rtree_node_valid(rtree_elm_t *node) {
|
||||
return ((uintptr_t)node != (uintptr_t)0);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
||||
rtree_child_tryread(rtree_elm_t *elm, bool dependent)
|
||||
{
|
||||
rtree_child_tryread(rtree_elm_t *elm, bool dependent) {
|
||||
rtree_elm_t *child;
|
||||
|
||||
/* Double-checked read (first read may be stale). */
|
||||
child = elm->child;
|
||||
if (!dependent && !rtree_node_valid(child))
|
||||
if (!dependent && !rtree_node_valid(child)) {
|
||||
child = (rtree_elm_t *)atomic_read_p(&elm->pun);
|
||||
}
|
||||
assert(!dependent || child != NULL);
|
||||
return (child);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
||||
rtree_child_read(tsdn_t *tsdn, rtree_t *rtree, rtree_elm_t *elm, unsigned level,
|
||||
bool dependent)
|
||||
{
|
||||
bool dependent) {
|
||||
rtree_elm_t *child;
|
||||
|
||||
child = rtree_child_tryread(elm, dependent);
|
||||
if (!dependent && unlikely(!rtree_node_valid(child)))
|
||||
if (!dependent && unlikely(!rtree_node_valid(child))) {
|
||||
child = rtree_child_read_hard(tsdn, rtree, elm, level);
|
||||
}
|
||||
assert(!dependent || child != NULL);
|
||||
return (child);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_elm_read(rtree_elm_t *elm, bool dependent)
|
||||
{
|
||||
rtree_elm_read(rtree_elm_t *elm, bool dependent) {
|
||||
extent_t *extent;
|
||||
|
||||
if (dependent) {
|
||||
@@ -140,14 +136,12 @@ rtree_elm_read(rtree_elm_t *elm, bool dependent)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
rtree_elm_write(rtree_elm_t *elm, const extent_t *extent)
|
||||
{
|
||||
rtree_elm_write(rtree_elm_t *elm, const extent_t *extent) {
|
||||
atomic_write_p(&elm->pun, extent);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
||||
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
|
||||
{
|
||||
rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) {
|
||||
rtree_elm_t *subtree;
|
||||
|
||||
/* Double-checked read (first read may be stale). */
|
||||
@@ -161,21 +155,21 @@ rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
||||
rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level, bool dependent)
|
||||
{
|
||||
rtree_subtree_read(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
|
||||
bool dependent) {
|
||||
rtree_elm_t *subtree;
|
||||
|
||||
subtree = rtree_subtree_tryread(rtree, level, dependent);
|
||||
if (!dependent && unlikely(!rtree_node_valid(subtree)))
|
||||
if (!dependent && unlikely(!rtree_node_valid(subtree))) {
|
||||
subtree = rtree_subtree_read_hard(tsdn, rtree, level);
|
||||
}
|
||||
assert(!dependent || subtree != NULL);
|
||||
return (subtree);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_elm_t *
|
||||
rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing)
|
||||
{
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
uintptr_t subkey;
|
||||
unsigned start_level;
|
||||
rtree_elm_t *node;
|
||||
@@ -184,9 +178,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
|
||||
if (dependent || init_missing) {
|
||||
if (likely(rtree_ctx->valid)) {
|
||||
if (key == rtree_ctx->key)
|
||||
if (key == rtree_ctx->key) {
|
||||
return (rtree_ctx->elms[rtree->height]);
|
||||
else {
|
||||
} else {
|
||||
unsigned no_ctx_start_level =
|
||||
rtree_start_level(rtree, key);
|
||||
unsigned ctx_start_level;
|
||||
@@ -237,8 +231,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
case level: \
|
||||
assert(level < (RTREE_HEIGHT_MAX-1)); \
|
||||
if (!dependent && unlikely(!rtree_node_valid(node))) { \
|
||||
if (init_missing) \
|
||||
if (init_missing) { \
|
||||
rtree_ctx->valid = false; \
|
||||
} \
|
||||
return (NULL); \
|
||||
} \
|
||||
subkey = rtree_subkey(rtree, key, level - \
|
||||
@@ -255,8 +250,9 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
case level: \
|
||||
assert(level == (RTREE_HEIGHT_MAX-1)); \
|
||||
if (!dependent && unlikely(!rtree_node_valid(node))) { \
|
||||
if (init_missing) \
|
||||
if (init_missing) { \
|
||||
rtree_ctx->valid = false; \
|
||||
} \
|
||||
return (NULL); \
|
||||
} \
|
||||
subkey = rtree_subkey(rtree, key, level - \
|
||||
@@ -330,16 +326,16 @@ rtree_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
const extent_t *extent)
|
||||
{
|
||||
const extent_t *extent) {
|
||||
rtree_elm_t *elm;
|
||||
|
||||
assert(extent != NULL); /* Use rtree_clear() for this case. */
|
||||
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
||||
|
||||
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, false, true);
|
||||
if (elm == NULL)
|
||||
if (elm == NULL) {
|
||||
return (true);
|
||||
}
|
||||
assert(rtree_elm_read(elm, false) == NULL);
|
||||
rtree_elm_write(elm, extent);
|
||||
|
||||
@@ -348,27 +344,27 @@ rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE extent_t *
|
||||
rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
|
||||
bool dependent)
|
||||
{
|
||||
bool dependent) {
|
||||
rtree_elm_t *elm;
|
||||
|
||||
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent, false);
|
||||
if (elm == NULL)
|
||||
if (elm == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (rtree_elm_read(elm, dependent));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE rtree_elm_t *
|
||||
rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key, bool dependent, bool init_missing)
|
||||
{
|
||||
uintptr_t key, bool dependent, bool init_missing) {
|
||||
rtree_elm_t *elm;
|
||||
|
||||
elm = rtree_elm_lookup(tsdn, rtree, rtree_ctx, key, dependent,
|
||||
init_missing);
|
||||
if (!dependent && elm == NULL)
|
||||
if (!dependent && elm == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
{
|
||||
extent_t *extent;
|
||||
void *s;
|
||||
@@ -380,52 +376,53 @@ rtree_elm_acquire(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
} while (atomic_cas_p(&elm->pun, (void *)extent, s));
|
||||
}
|
||||
|
||||
if (config_debug)
|
||||
if (config_debug) {
|
||||
rtree_elm_witness_acquire(tsdn, rtree, key, elm);
|
||||
}
|
||||
|
||||
return (elm);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE extent_t *
|
||||
rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
|
||||
{
|
||||
rtree_elm_read_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
|
||||
extent_t *extent;
|
||||
|
||||
assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
|
||||
extent = (extent_t *)((uintptr_t)elm->pun & ~((uintptr_t)0x1));
|
||||
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
||||
|
||||
if (config_debug)
|
||||
if (config_debug) {
|
||||
rtree_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
return (extent);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
rtree_elm_write_acquired(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm,
|
||||
const extent_t *extent)
|
||||
{
|
||||
const extent_t *extent) {
|
||||
assert(((uintptr_t)extent & (uintptr_t)0x1) == (uintptr_t)0x0);
|
||||
assert(((uintptr_t)elm->pun & (uintptr_t)0x1) == (uintptr_t)0x1);
|
||||
|
||||
if (config_debug)
|
||||
if (config_debug) {
|
||||
rtree_elm_witness_access(tsdn, rtree, elm);
|
||||
}
|
||||
|
||||
elm->pun = (void *)((uintptr_t)extent | (uintptr_t)0x1);
|
||||
assert(rtree_elm_read_acquired(tsdn, rtree, elm) == extent);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm)
|
||||
{
|
||||
rtree_elm_release(tsdn_t *tsdn, const rtree_t *rtree, rtree_elm_t *elm) {
|
||||
rtree_elm_write(elm, rtree_elm_read_acquired(tsdn, rtree, elm));
|
||||
if (config_debug)
|
||||
if (config_debug) {
|
||||
rtree_elm_witness_release(tsdn, rtree, elm);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key)
|
||||
{
|
||||
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
|
||||
uintptr_t key) {
|
||||
rtree_elm_t *elm;
|
||||
|
||||
elm = rtree_elm_acquire(tsdn, rtree, rtree_ctx, key, true, false);
|
||||
|
@@ -8,21 +8,21 @@ void spin_adaptive(spin_t *spin);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_))
|
||||
JEMALLOC_INLINE void
|
||||
spin_init(spin_t *spin)
|
||||
{
|
||||
spin_init(spin_t *spin) {
|
||||
spin->iteration = 0;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
spin_adaptive(spin_t *spin)
|
||||
{
|
||||
spin_adaptive(spin_t *spin) {
|
||||
volatile uint64_t i;
|
||||
|
||||
for (i = 0; i < (KQU(1) << spin->iteration); i++)
|
||||
for (i = 0; i < (KQU(1) << spin->iteration); i++) {
|
||||
CPU_SPINWAIT;
|
||||
}
|
||||
|
||||
if (spin->iteration < 63)
|
||||
if (spin->iteration < 63) {
|
||||
spin->iteration++;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -21,8 +21,7 @@ tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||
JEMALLOC_INLINE void
|
||||
tcache_flush(void)
|
||||
{
|
||||
tcache_flush(void) {
|
||||
tsd_t *tsd;
|
||||
|
||||
cassert(config_tcache);
|
||||
@@ -32,8 +31,7 @@ tcache_flush(void)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
tcache_enabled_get(void)
|
||||
{
|
||||
tcache_enabled_get(void) {
|
||||
tsd_t *tsd;
|
||||
tcache_enabled_t tcache_enabled;
|
||||
|
||||
@@ -50,8 +48,7 @@ tcache_enabled_get(void)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
tcache_enabled_set(bool enabled)
|
||||
{
|
||||
tcache_enabled_set(bool enabled) {
|
||||
tsd_t *tsd;
|
||||
tcache_enabled_t tcache_enabled;
|
||||
|
||||
@@ -62,21 +59,23 @@ tcache_enabled_set(bool enabled)
|
||||
tcache_enabled = (tcache_enabled_t)enabled;
|
||||
tsd_tcache_enabled_set(tsd, tcache_enabled);
|
||||
|
||||
if (!enabled)
|
||||
if (!enabled) {
|
||||
tcache_cleanup(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcache_get(tsd_t *tsd, bool create)
|
||||
{
|
||||
tcache_get(tsd_t *tsd, bool create) {
|
||||
tcache_t *tcache;
|
||||
|
||||
if (!config_tcache)
|
||||
if (!config_tcache) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
tcache = tsd_tcache_get(tsd);
|
||||
if (!create)
|
||||
if (!create) {
|
||||
return (tcache);
|
||||
}
|
||||
if (unlikely(tcache == NULL) && tsd_nominal(tsd)) {
|
||||
tcache = tcache_get_hard(tsd);
|
||||
tsd_tcache_set(tsd, tcache);
|
||||
@@ -86,18 +85,18 @@ tcache_get(tsd_t *tsd, bool create)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_event(tsd_t *tsd, tcache_t *tcache)
|
||||
{
|
||||
if (TCACHE_GC_INCR == 0)
|
||||
tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
||||
if (TCACHE_GC_INCR == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(ticker_tick(&tcache->gc_ticker)))
|
||||
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
|
||||
tcache_event_hard(tsd, tcache);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
|
||||
{
|
||||
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
|
||||
void *ret;
|
||||
|
||||
if (unlikely(tbin->ncached == 0)) {
|
||||
@@ -116,16 +115,16 @@ tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success)
|
||||
ret = *(tbin->avail - tbin->ncached);
|
||||
tbin->ncached--;
|
||||
|
||||
if (unlikely((int)tbin->ncached < tbin->low_water))
|
||||
if (unlikely((int)tbin->ncached < tbin->low_water)) {
|
||||
tbin->low_water = tbin->ncached;
|
||||
}
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path)
|
||||
{
|
||||
szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
tcache_bin_t *tbin;
|
||||
bool tcache_success;
|
||||
@@ -138,13 +137,15 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
if (unlikely(!tcache_success)) {
|
||||
bool tcache_hard_success;
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL))
|
||||
if (unlikely(arena == NULL)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
|
||||
tbin, binind, &tcache_hard_success);
|
||||
if (tcache_hard_success == false)
|
||||
if (tcache_hard_success == false) {
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
|
||||
assert(ret);
|
||||
@@ -162,8 +163,9 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
} else if (unlikely(opt_zero))
|
||||
} else if (unlikely(opt_zero)) {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
|
||||
@@ -173,18 +175,19 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
if (config_stats) {
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
}
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes += usize;
|
||||
}
|
||||
tcache_event(tsd, tcache);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path)
|
||||
{
|
||||
szind_t binind, bool zero, bool slow_path) {
|
||||
void *ret;
|
||||
tcache_bin_t *tbin;
|
||||
bool tcache_success;
|
||||
@@ -199,12 +202,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL))
|
||||
if (unlikely(arena == NULL)) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
||||
if (ret == NULL)
|
||||
if (ret == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
} else {
|
||||
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
||||
@@ -220,16 +225,20 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
memset(ret, JEMALLOC_ALLOC_JUNK,
|
||||
usize);
|
||||
} else if (unlikely(opt_zero))
|
||||
} else if (unlikely(opt_zero)) {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
if (config_stats) {
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
}
|
||||
if (config_prof) {
|
||||
tcache->prof_accumbytes += usize;
|
||||
}
|
||||
}
|
||||
|
||||
tcache_event(tsd, tcache);
|
||||
@@ -238,15 +247,15 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
bool slow_path)
|
||||
{
|
||||
bool slow_path) {
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
||||
}
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
@@ -263,8 +272,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
bool slow_path)
|
||||
{
|
||||
bool slow_path) {
|
||||
szind_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
@@ -274,8 +282,9 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
|
||||
binind = size2index(size);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
large_dalloc_junk(ptr, size);
|
||||
}
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
@@ -291,8 +300,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||
tcaches_get(tsd_t *tsd, unsigned ind)
|
||||
{
|
||||
tcaches_get(tsd_t *tsd, unsigned ind) {
|
||||
tcaches_t *elm = &tcaches[ind];
|
||||
if (unlikely(elm->tcache == NULL)) {
|
||||
elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
|
||||
|
@@ -11,27 +11,23 @@ bool ticker_tick(ticker_t *ticker);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_))
|
||||
JEMALLOC_INLINE void
|
||||
ticker_init(ticker_t *ticker, int32_t nticks)
|
||||
{
|
||||
ticker_init(ticker_t *ticker, int32_t nticks) {
|
||||
ticker->tick = nticks;
|
||||
ticker->nticks = nticks;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
ticker_copy(ticker_t *ticker, const ticker_t *other)
|
||||
{
|
||||
ticker_copy(ticker_t *ticker, const ticker_t *other) {
|
||||
*ticker = *other;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE int32_t
|
||||
ticker_read(const ticker_t *ticker)
|
||||
{
|
||||
ticker_read(const ticker_t *ticker) {
|
||||
return (ticker->tick);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
ticker_ticks(ticker_t *ticker, int32_t nticks)
|
||||
{
|
||||
ticker_ticks(ticker_t *ticker, int32_t nticks) {
|
||||
if (unlikely(ticker->tick < nticks)) {
|
||||
ticker->tick = ticker->nticks;
|
||||
return (true);
|
||||
@@ -41,8 +37,7 @@ ticker_ticks(ticker_t *ticker, int32_t nticks)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
ticker_tick(ticker_t *ticker)
|
||||
{
|
||||
ticker_tick(ticker_t *ticker) {
|
||||
return (ticker_ticks(ticker, 1));
|
||||
}
|
||||
#endif
|
||||
|
@@ -25,12 +25,12 @@ malloc_tsd_externs(, tsd_t)
|
||||
malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup)
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch_impl(bool init)
|
||||
{
|
||||
tsd_fetch_impl(bool init) {
|
||||
tsd_t *tsd = tsd_get(init);
|
||||
|
||||
if (!init && tsd_get_allocates() && tsd == NULL)
|
||||
if (!init && tsd_get_allocates() && tsd == NULL) {
|
||||
return (NULL);
|
||||
}
|
||||
assert(tsd != NULL);
|
||||
|
||||
if (unlikely(tsd->state != tsd_state_nominal)) {
|
||||
@@ -41,47 +41,42 @@ tsd_fetch_impl(bool init)
|
||||
} else if (tsd->state == tsd_state_purgatory) {
|
||||
tsd->state = tsd_state_reincarnated;
|
||||
tsd_set(tsd);
|
||||
} else
|
||||
} else {
|
||||
assert(tsd->state == tsd_state_reincarnated);
|
||||
}
|
||||
}
|
||||
|
||||
return (tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsd_fetch(void)
|
||||
{
|
||||
tsd_fetch(void) {
|
||||
return (tsd_fetch_impl(true));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsd_tsdn(tsd_t *tsd)
|
||||
{
|
||||
tsd_tsdn(tsd_t *tsd) {
|
||||
return ((tsdn_t *)tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
tsd_nominal(tsd_t *tsd)
|
||||
{
|
||||
tsd_nominal(tsd_t *tsd) {
|
||||
return (tsd->state == tsd_state_nominal);
|
||||
}
|
||||
|
||||
#define O(n, t, c) \
|
||||
JEMALLOC_ALWAYS_INLINE t * \
|
||||
tsd_##n##p_get(tsd_t *tsd) \
|
||||
{ \
|
||||
tsd_##n##p_get(tsd_t *tsd) { \
|
||||
return (&tsd->n); \
|
||||
} \
|
||||
\
|
||||
JEMALLOC_ALWAYS_INLINE t \
|
||||
tsd_##n##_get(tsd_t *tsd) \
|
||||
{ \
|
||||
tsd_##n##_get(tsd_t *tsd) { \
|
||||
return (*tsd_##n##p_get(tsd)); \
|
||||
} \
|
||||
\
|
||||
JEMALLOC_ALWAYS_INLINE void \
|
||||
tsd_##n##_set(tsd_t *tsd, t n) \
|
||||
{ \
|
||||
tsd_##n##_set(tsd_t *tsd, t n) { \
|
||||
assert(tsd->state == tsd_state_nominal); \
|
||||
tsd->n = n; \
|
||||
}
|
||||
@@ -89,31 +84,28 @@ MALLOC_TSD
|
||||
#undef O
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsdn_t *
|
||||
tsdn_fetch(void)
|
||||
{
|
||||
if (!tsd_booted_get())
|
||||
tsdn_fetch(void) {
|
||||
if (!tsd_booted_get()) {
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
return (tsd_tsdn(tsd_fetch_impl(false)));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsdn_null(const tsdn_t *tsdn)
|
||||
{
|
||||
tsdn_null(const tsdn_t *tsdn) {
|
||||
return (tsdn == NULL);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE tsd_t *
|
||||
tsdn_tsd(tsdn_t *tsdn)
|
||||
{
|
||||
tsdn_tsd(tsdn_t *tsdn) {
|
||||
assert(!tsdn_null(tsdn));
|
||||
|
||||
return (&tsdn->tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE rtree_ctx_t *
|
||||
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback)
|
||||
{
|
||||
tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
|
||||
/*
|
||||
* If tsd cannot be accessed, initialize the fallback rtree_ctx and
|
||||
* return a pointer to it.
|
||||
|
@@ -175,8 +175,7 @@ a_attr bool a_name##tsd_booted = false;
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
a_name##tsd_cleanup_wrapper(void) { \
|
||||
if (a_name##tsd_initialized) { \
|
||||
a_name##tsd_initialized = false; \
|
||||
a_cleanup(&a_name##tsd_tls); \
|
||||
@@ -184,8 +183,7 @@ a_name##tsd_cleanup_wrapper(void) \
|
||||
return (a_name##tsd_initialized); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
a_name##tsd_boot0(void) { \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##tsd_cleanup_wrapper); \
|
||||
@@ -194,96 +192,88 @@ a_name##tsd_boot0(void) \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1(void) \
|
||||
{ \
|
||||
a_name##tsd_boot1(void) { \
|
||||
/* Do nothing. */ \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
a_name##tsd_boot(void) { \
|
||||
return (a_name##tsd_boot0()); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_booted_get(void) \
|
||||
{ \
|
||||
a_name##tsd_booted_get(void) { \
|
||||
return (a_name##tsd_booted); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_get_allocates(void) \
|
||||
{ \
|
||||
a_name##tsd_get_allocates(void) { \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(bool init) \
|
||||
{ \
|
||||
a_name##tsd_get(bool init) { \
|
||||
assert(a_name##tsd_booted); \
|
||||
return (&a_name##tsd_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##tsd_set(a_type *val) { \
|
||||
assert(a_name##tsd_booted); \
|
||||
if (likely(&a_name##tsd_tls != val)) \
|
||||
if (likely(&a_name##tsd_tls != val)) { \
|
||||
a_name##tsd_tls = (*val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
} \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
a_name##tsd_initialized = true; \
|
||||
} \
|
||||
}
|
||||
#elif (defined(JEMALLOC_TLS))
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
a_name##tsd_boot0(void) { \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \
|
||||
0) \
|
||||
0) { \
|
||||
return (true); \
|
||||
} \
|
||||
} \
|
||||
a_name##tsd_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1(void) \
|
||||
{ \
|
||||
a_name##tsd_boot1(void) { \
|
||||
/* Do nothing. */ \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
a_name##tsd_boot(void) { \
|
||||
return (a_name##tsd_boot0()); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_booted_get(void) \
|
||||
{ \
|
||||
a_name##tsd_booted_get(void) { \
|
||||
return (a_name##tsd_booted); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_get_allocates(void) \
|
||||
{ \
|
||||
a_name##tsd_get_allocates(void) { \
|
||||
return (false); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(bool init) \
|
||||
{ \
|
||||
a_name##tsd_get(bool init) { \
|
||||
assert(a_name##tsd_booted); \
|
||||
return (&a_name##tsd_tls); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##tsd_set(a_type *val) { \
|
||||
assert(a_name##tsd_booted); \
|
||||
if (likely(&a_name##tsd_tls != val)) \
|
||||
if (likely(&a_name##tsd_tls != val)) { \
|
||||
a_name##tsd_tls = (*val); \
|
||||
} \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
if (pthread_setspecific(a_name##tsd_tsd, \
|
||||
(void *)(&a_name##tsd_tls))) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
if (opt_abort) { \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
@@ -292,15 +282,15 @@ a_name##tsd_set(a_type *val) \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr bool \
|
||||
a_name##tsd_cleanup_wrapper(void) \
|
||||
{ \
|
||||
a_name##tsd_cleanup_wrapper(void) { \
|
||||
DWORD error = GetLastError(); \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##tsd_tsd); \
|
||||
SetLastError(error); \
|
||||
\
|
||||
if (wrapper == NULL) \
|
||||
if (wrapper == NULL) { \
|
||||
return (false); \
|
||||
} \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
wrapper->initialized) { \
|
||||
wrapper->initialized = false; \
|
||||
@@ -314,8 +304,7 @@ a_name##tsd_cleanup_wrapper(void) \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
||||
{ \
|
||||
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) { \
|
||||
if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
" TSD for "#a_name"\n"); \
|
||||
@@ -323,8 +312,7 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
||||
} \
|
||||
} \
|
||||
a_attr a_name##tsd_wrapper_t * \
|
||||
a_name##tsd_wrapper_get(bool init) \
|
||||
{ \
|
||||
a_name##tsd_wrapper_get(bool init) { \
|
||||
DWORD error = GetLastError(); \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
||||
TlsGetValue(a_name##tsd_tsd); \
|
||||
@@ -346,11 +334,11 @@ a_name##tsd_wrapper_get(bool init) \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
a_name##tsd_boot0(void) { \
|
||||
a_name##tsd_tsd = TlsAlloc(); \
|
||||
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \
|
||||
if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) { \
|
||||
return (true); \
|
||||
} \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
malloc_tsd_cleanup_register( \
|
||||
&a_name##tsd_cleanup_wrapper); \
|
||||
@@ -360,8 +348,7 @@ a_name##tsd_boot0(void) \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1(void) \
|
||||
{ \
|
||||
a_name##tsd_boot1(void) { \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
@@ -375,54 +362,52 @@ a_name##tsd_boot1(void) \
|
||||
a_name##tsd_wrapper_set(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
if (a_name##tsd_boot0()) \
|
||||
a_name##tsd_boot(void) { \
|
||||
if (a_name##tsd_boot0()) { \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##tsd_boot1(); \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_booted_get(void) \
|
||||
{ \
|
||||
a_name##tsd_booted_get(void) { \
|
||||
return (a_name##tsd_booted); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_get_allocates(void) \
|
||||
{ \
|
||||
a_name##tsd_get_allocates(void) { \
|
||||
return (true); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(bool init) \
|
||||
{ \
|
||||
a_name##tsd_get(bool init) { \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(init); \
|
||||
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
|
||||
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) { \
|
||||
return (NULL); \
|
||||
} \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##tsd_set(a_type *val) { \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(true); \
|
||||
if (likely(&wrapper->val != val)) \
|
||||
if (likely(&wrapper->val != val)) { \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
} \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
wrapper->initialized = true; \
|
||||
} \
|
||||
}
|
||||
#else
|
||||
#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \
|
||||
a_cleanup) \
|
||||
/* Initialization/cleanup. */ \
|
||||
a_attr void \
|
||||
a_name##tsd_cleanup_wrapper(void *arg) \
|
||||
{ \
|
||||
a_name##tsd_cleanup_wrapper(void *arg) { \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \
|
||||
\
|
||||
if (a_cleanup != malloc_tsd_no_cleanup && \
|
||||
@@ -435,8 +420,9 @@ a_name##tsd_cleanup_wrapper(void *arg) \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error" \
|
||||
" setting TSD for "#a_name"\n"); \
|
||||
if (opt_abort) \
|
||||
if (opt_abort) { \
|
||||
abort(); \
|
||||
} \
|
||||
} \
|
||||
return; \
|
||||
} \
|
||||
@@ -444,8 +430,7 @@ a_name##tsd_cleanup_wrapper(void *arg) \
|
||||
malloc_tsd_dalloc(wrapper); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
||||
{ \
|
||||
a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) { \
|
||||
if (pthread_setspecific(a_name##tsd_tsd, \
|
||||
(void *)wrapper)) { \
|
||||
malloc_write("<jemalloc>: Error setting" \
|
||||
@@ -454,8 +439,7 @@ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \
|
||||
} \
|
||||
} \
|
||||
a_attr a_name##tsd_wrapper_t * \
|
||||
a_name##tsd_wrapper_get(bool init) \
|
||||
{ \
|
||||
a_name##tsd_wrapper_get(bool init) { \
|
||||
a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \
|
||||
pthread_getspecific(a_name##tsd_tsd); \
|
||||
\
|
||||
@@ -464,8 +448,9 @@ a_name##tsd_wrapper_get(bool init) \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
tsd_init_check_recursion(&a_name##tsd_init_head, \
|
||||
&block); \
|
||||
if (wrapper) \
|
||||
return (wrapper); \
|
||||
if (wrapper) { \
|
||||
return (wrapper); \
|
||||
} \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
block.data = (void *)wrapper; \
|
||||
@@ -483,18 +468,17 @@ a_name##tsd_wrapper_get(bool init) \
|
||||
return (wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot0(void) \
|
||||
{ \
|
||||
a_name##tsd_boot0(void) { \
|
||||
if (pthread_key_create(&a_name##tsd_tsd, \
|
||||
a_name##tsd_cleanup_wrapper) != 0) \
|
||||
a_name##tsd_cleanup_wrapper) != 0) { \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \
|
||||
a_name##tsd_booted = true; \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_boot1(void) \
|
||||
{ \
|
||||
a_name##tsd_boot1(void) { \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
wrapper = (a_name##tsd_wrapper_t *) \
|
||||
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
|
||||
@@ -508,46 +492,45 @@ a_name##tsd_boot1(void) \
|
||||
a_name##tsd_wrapper_set(wrapper); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_boot(void) \
|
||||
{ \
|
||||
if (a_name##tsd_boot0()) \
|
||||
a_name##tsd_boot(void) { \
|
||||
if (a_name##tsd_boot0()) { \
|
||||
return (true); \
|
||||
} \
|
||||
a_name##tsd_boot1(); \
|
||||
return (false); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_booted_get(void) \
|
||||
{ \
|
||||
a_name##tsd_booted_get(void) { \
|
||||
return (a_name##tsd_booted); \
|
||||
} \
|
||||
a_attr bool \
|
||||
a_name##tsd_get_allocates(void) \
|
||||
{ \
|
||||
a_name##tsd_get_allocates(void) { \
|
||||
return (true); \
|
||||
} \
|
||||
/* Get/set. */ \
|
||||
a_attr a_type * \
|
||||
a_name##tsd_get(bool init) \
|
||||
{ \
|
||||
a_name##tsd_get(bool init) { \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(init); \
|
||||
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \
|
||||
if (a_name##tsd_get_allocates() && !init && wrapper == NULL) { \
|
||||
return (NULL); \
|
||||
} \
|
||||
return (&wrapper->val); \
|
||||
} \
|
||||
a_attr void \
|
||||
a_name##tsd_set(a_type *val) \
|
||||
{ \
|
||||
a_name##tsd_set(a_type *val) { \
|
||||
a_name##tsd_wrapper_t *wrapper; \
|
||||
\
|
||||
assert(a_name##tsd_booted); \
|
||||
wrapper = a_name##tsd_wrapper_get(true); \
|
||||
if (likely(&wrapper->val != val)) \
|
||||
if (likely(&wrapper->val != val)) { \
|
||||
wrapper->val = *(val); \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) \
|
||||
} \
|
||||
if (a_cleanup != malloc_tsd_no_cleanup) { \
|
||||
wrapper->initialized = true; \
|
||||
} \
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -25,26 +25,22 @@ int get_errno(void);
|
||||
#endif
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
ffs_llu(unsigned long long bitmap)
|
||||
{
|
||||
ffs_llu(unsigned long long bitmap) {
|
||||
return (JEMALLOC_INTERNAL_FFSLL(bitmap));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
ffs_lu(unsigned long bitmap)
|
||||
{
|
||||
ffs_lu(unsigned long bitmap) {
|
||||
return (JEMALLOC_INTERNAL_FFSL(bitmap));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
ffs_u(unsigned bitmap)
|
||||
{
|
||||
ffs_u(unsigned bitmap) {
|
||||
return (JEMALLOC_INTERNAL_FFS(bitmap));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
ffs_zu(size_t bitmap)
|
||||
{
|
||||
ffs_zu(size_t bitmap) {
|
||||
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
|
||||
return (ffs_u(bitmap));
|
||||
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
|
||||
@@ -57,8 +53,7 @@ ffs_zu(size_t bitmap)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
ffs_u64(uint64_t bitmap)
|
||||
{
|
||||
ffs_u64(uint64_t bitmap) {
|
||||
#if LG_SIZEOF_LONG == 3
|
||||
return (ffs_lu(bitmap));
|
||||
#elif LG_SIZEOF_LONG_LONG == 3
|
||||
@@ -69,8 +64,7 @@ ffs_u64(uint64_t bitmap)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE unsigned
|
||||
ffs_u32(uint32_t bitmap)
|
||||
{
|
||||
ffs_u32(uint32_t bitmap) {
|
||||
#if LG_SIZEOF_INT == 2
|
||||
return (ffs_u(bitmap));
|
||||
#else
|
||||
@@ -80,8 +74,7 @@ ffs_u32(uint32_t bitmap)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint64_t
|
||||
pow2_ceil_u64(uint64_t x)
|
||||
{
|
||||
pow2_ceil_u64(uint64_t x) {
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
@@ -94,8 +87,7 @@ pow2_ceil_u64(uint64_t x)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE uint32_t
|
||||
pow2_ceil_u32(uint32_t x)
|
||||
{
|
||||
pow2_ceil_u32(uint32_t x) {
|
||||
x--;
|
||||
x |= x >> 1;
|
||||
x |= x >> 2;
|
||||
@@ -108,8 +100,7 @@ pow2_ceil_u32(uint32_t x)
|
||||
|
||||
/* Compute the smallest power of 2 that is >= x. */
|
||||
JEMALLOC_INLINE size_t
|
||||
pow2_ceil_zu(size_t x)
|
||||
{
|
||||
pow2_ceil_zu(size_t x) {
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
return (pow2_ceil_u64(x));
|
||||
#else
|
||||
@@ -119,8 +110,7 @@ pow2_ceil_zu(size_t x)
|
||||
|
||||
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
||||
JEMALLOC_INLINE unsigned
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
lg_floor(size_t x) {
|
||||
size_t ret;
|
||||
|
||||
assert(x != 0);
|
||||
@@ -134,8 +124,7 @@ lg_floor(size_t x)
|
||||
}
|
||||
#elif (defined(_MSC_VER))
|
||||
JEMALLOC_INLINE unsigned
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
lg_floor(size_t x) {
|
||||
unsigned long ret;
|
||||
|
||||
assert(x != 0);
|
||||
@@ -152,8 +141,7 @@ lg_floor(size_t x)
|
||||
}
|
||||
#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
|
||||
JEMALLOC_INLINE unsigned
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
lg_floor(size_t x) {
|
||||
assert(x != 0);
|
||||
|
||||
#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
|
||||
@@ -166,8 +154,7 @@ lg_floor(size_t x)
|
||||
}
|
||||
#else
|
||||
JEMALLOC_INLINE unsigned
|
||||
lg_floor(size_t x)
|
||||
{
|
||||
lg_floor(size_t x) {
|
||||
assert(x != 0);
|
||||
|
||||
x |= (x >> 1);
|
||||
@@ -178,8 +165,9 @@ lg_floor(size_t x)
|
||||
#if (LG_SIZEOF_PTR == 3)
|
||||
x |= (x >> 32);
|
||||
#endif
|
||||
if (x == SIZE_T_MAX)
|
||||
if (x == SIZE_T_MAX) {
|
||||
return ((8 << LG_SIZEOF_PTR) - 1);
|
||||
}
|
||||
x++;
|
||||
return (ffs_zu(x) - 2);
|
||||
}
|
||||
@@ -187,8 +175,7 @@ lg_floor(size_t x)
|
||||
|
||||
/* Set error code. */
|
||||
JEMALLOC_INLINE void
|
||||
set_errno(int errnum)
|
||||
{
|
||||
set_errno(int errnum) {
|
||||
#ifdef _WIN32
|
||||
SetLastError(errnum);
|
||||
#else
|
||||
@@ -198,8 +185,7 @@ set_errno(int errnum)
|
||||
|
||||
/* Get last error code. */
|
||||
JEMALLOC_INLINE int
|
||||
get_errno(void)
|
||||
{
|
||||
get_errno(void) {
|
||||
#ifdef _WIN32
|
||||
return (GetLastError());
|
||||
#else
|
||||
|
@@ -87,8 +87,9 @@
|
||||
|
||||
/* Use to assert a particular configuration, e.g., cassert(config_debug). */
|
||||
#define cassert(c) do { \
|
||||
if (unlikely(!(c))) \
|
||||
if (unlikely(!(c))) { \
|
||||
not_reached(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_UTIL_TYPES_H */
|
||||
|
@@ -13,8 +13,7 @@ void witness_unlock(tsdn_t *tsdn, witness_t *witness);
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
|
||||
/* Helper, not intended for direct use. */
|
||||
JEMALLOC_INLINE bool
|
||||
witness_owner(tsd_t *tsd, const witness_t *witness)
|
||||
{
|
||||
witness_owner(tsd_t *tsd, const witness_t *witness) {
|
||||
witness_list_t *witnesses;
|
||||
witness_t *w;
|
||||
|
||||
@@ -22,90 +21,101 @@ witness_owner(tsd_t *tsd, const witness_t *witness)
|
||||
|
||||
witnesses = tsd_witnessesp_get(tsd);
|
||||
ql_foreach(w, witnesses, link) {
|
||||
if (w == witness)
|
||||
if (w == witness) {
|
||||
return (true);
|
||||
}
|
||||
}
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
|
||||
{
|
||||
witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) {
|
||||
tsd_t *tsd;
|
||||
|
||||
if (!config_debug)
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsdn_null(tsdn))
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
if (witness->rank == WITNESS_RANK_OMIT)
|
||||
if (witness->rank == WITNESS_RANK_OMIT) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (witness_owner(tsd, witness))
|
||||
if (witness_owner(tsd, witness)) {
|
||||
return;
|
||||
}
|
||||
witness_owner_error(witness);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
|
||||
{
|
||||
witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) {
|
||||
tsd_t *tsd;
|
||||
witness_list_t *witnesses;
|
||||
witness_t *w;
|
||||
|
||||
if (!config_debug)
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsdn_null(tsdn))
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
if (witness->rank == WITNESS_RANK_OMIT)
|
||||
if (witness->rank == WITNESS_RANK_OMIT) {
|
||||
return;
|
||||
}
|
||||
|
||||
witnesses = tsd_witnessesp_get(tsd);
|
||||
ql_foreach(w, witnesses, link) {
|
||||
if (w == witness)
|
||||
if (w == witness) {
|
||||
witness_not_owner_error(witness);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
witness_assert_lockless(tsdn_t *tsdn)
|
||||
{
|
||||
witness_assert_lockless(tsdn_t *tsdn) {
|
||||
tsd_t *tsd;
|
||||
witness_list_t *witnesses;
|
||||
witness_t *w;
|
||||
|
||||
if (!config_debug)
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsdn_null(tsdn))
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
|
||||
witnesses = tsd_witnessesp_get(tsd);
|
||||
w = ql_last(witnesses, link);
|
||||
if (w != NULL)
|
||||
if (w != NULL) {
|
||||
witness_lockless_error(witnesses);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
witness_lock(tsdn_t *tsdn, witness_t *witness)
|
||||
{
|
||||
witness_lock(tsdn_t *tsdn, witness_t *witness) {
|
||||
tsd_t *tsd;
|
||||
witness_list_t *witnesses;
|
||||
witness_t *w;
|
||||
|
||||
if (!config_debug)
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsdn_null(tsdn))
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
if (witness->rank == WITNESS_RANK_OMIT)
|
||||
if (witness->rank == WITNESS_RANK_OMIT) {
|
||||
return;
|
||||
}
|
||||
|
||||
witness_assert_not_owner(tsdn, witness);
|
||||
|
||||
@@ -133,19 +143,21 @@ witness_lock(tsdn_t *tsdn, witness_t *witness)
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
witness_unlock(tsdn_t *tsdn, witness_t *witness)
|
||||
{
|
||||
witness_unlock(tsdn_t *tsdn, witness_t *witness) {
|
||||
tsd_t *tsd;
|
||||
witness_list_t *witnesses;
|
||||
|
||||
if (!config_debug)
|
||||
if (!config_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsdn_null(tsdn))
|
||||
if (tsdn_null(tsdn)) {
|
||||
return;
|
||||
}
|
||||
tsd = tsdn_tsd(tsdn);
|
||||
if (witness->rank == WITNESS_RANK_OMIT)
|
||||
if (witness->rank == WITNESS_RANK_OMIT) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether owner before removal, rather than relying on
|
||||
@@ -155,8 +167,9 @@ witness_unlock(tsdn_t *tsdn, witness_t *witness)
|
||||
if (witness_owner(tsd, witness)) {
|
||||
witnesses = tsd_witnessesp_get(tsd);
|
||||
ql_remove(witnesses, witness, link);
|
||||
} else
|
||||
} else {
|
||||
witness_assert_owner(tsdn, witness);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -6,17 +6,16 @@
|
||||
#ifdef _MSC_VER
|
||||
# include <intrin.h>
|
||||
# pragma intrinsic(_BitScanForward)
|
||||
static __forceinline int ffsl(long x)
|
||||
{
|
||||
static __forceinline int ffsl(long x) {
|
||||
unsigned long i;
|
||||
|
||||
if (_BitScanForward(&i, x))
|
||||
if (_BitScanForward(&i, x)) {
|
||||
return (i + 1);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
static __forceinline int ffs(int x)
|
||||
{
|
||||
static __forceinline int ffs(int x) {
|
||||
return (ffsl(x));
|
||||
}
|
||||
|
||||
@@ -24,12 +23,12 @@ static __forceinline int ffs(int x)
|
||||
# pragma intrinsic(_BitScanForward64)
|
||||
# endif
|
||||
|
||||
static __forceinline int ffsll(unsigned __int64 x)
|
||||
{
|
||||
static __forceinline int ffsll(unsigned __int64 x) {
|
||||
unsigned long i;
|
||||
#ifdef _M_X64
|
||||
if (_BitScanForward64(&i, x))
|
||||
if (_BitScanForward64(&i, x)) {
|
||||
return (i + 1);
|
||||
}
|
||||
return (0);
|
||||
#else
|
||||
// Fallback for 32-bit build where 64-bit version not available
|
||||
@@ -41,10 +40,11 @@ static __forceinline int ffsll(unsigned __int64 x)
|
||||
|
||||
s.ll = x;
|
||||
|
||||
if (_BitScanForward(&i, s.l[0]))
|
||||
if (_BitScanForward(&i, s.l[0])) {
|
||||
return (i + 1);
|
||||
else if(_BitScanForward(&i, s.l[1]))
|
||||
} else if(_BitScanForward(&i, s.l[1])) {
|
||||
return (i + 33);
|
||||
}
|
||||
return (0);
|
||||
#endif
|
||||
}
|
||||
|
Reference in New Issue
Block a user