2011-03-19 09:15:37 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
|
|
|
|
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
|
|
|
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
2015-02-03 05:49:08 +08:00
|
|
|
#define atomic_read_p(p) atomic_add_p(p, NULL)
|
2012-03-24 07:09:56 +08:00
|
|
|
#define atomic_read_z(p) atomic_add_z(p, 0)
|
2012-03-24 09:05:51 +08:00
|
|
|
#define atomic_read_u(p) atomic_add_u(p, 0)
|
2011-03-19 09:15:37 +08:00
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
|
|
/******************************************************************************/
|
|
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
|
2014-08-07 14:36:19 +08:00
|
|
|
/*
|
2015-02-03 05:49:08 +08:00
|
|
|
* All arithmetic functions return the arithmetic result of the atomic
|
|
|
|
* operation. Some atomic operation APIs return the value prior to mutation, in
|
|
|
|
* which case the following functions must redundantly compute the result so
|
|
|
|
* that it can be returned. These functions are normally inlined, so the extra
|
|
|
|
* operations can be optimized away if the return values aren't used by the
|
|
|
|
* callers.
|
2014-08-07 14:36:19 +08:00
|
|
|
*
|
2015-02-03 05:49:08 +08:00
|
|
|
* <t> atomic_read_<t>(<t> *p) { return (*p); }
|
2014-08-07 14:36:19 +08:00
|
|
|
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
|
|
|
|
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
|
2015-02-03 05:49:08 +08:00
|
|
|
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
|
|
|
|
* {
|
|
|
|
* if (*p != c)
|
|
|
|
* return (true);
|
|
|
|
* *p = s;
|
|
|
|
* return (false);
|
|
|
|
* }
|
|
|
|
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
|
2014-08-07 14:36:19 +08:00
|
|
|
*/
|
|
|
|
|
2011-03-19 09:15:37 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
|
|
|
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
|
|
|
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
2015-02-03 05:49:08 +08:00
|
|
|
bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s);
|
|
|
|
void atomic_write_uint64(uint64_t *p, uint64_t x);
|
2011-03-19 09:15:37 +08:00
|
|
|
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
|
|
|
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
2015-02-03 05:49:08 +08:00
|
|
|
bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s);
|
|
|
|
void atomic_write_uint32(uint32_t *p, uint32_t x);
|
|
|
|
void *atomic_add_p(void **p, void *x);
|
|
|
|
void *atomic_sub_p(void **p, void *x);
|
|
|
|
bool atomic_cas_p(void **p, void *c, void *s);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
void atomic_write_p(void **p, const void *x);
|
2012-03-24 07:09:56 +08:00
|
|
|
size_t atomic_add_z(size_t *p, size_t x);
|
|
|
|
size_t atomic_sub_z(size_t *p, size_t x);
|
2015-02-03 05:49:08 +08:00
|
|
|
bool atomic_cas_z(size_t *p, size_t c, size_t s);
|
|
|
|
void atomic_write_z(size_t *p, size_t x);
|
2012-03-24 09:05:51 +08:00
|
|
|
unsigned atomic_add_u(unsigned *p, unsigned x);
|
|
|
|
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
2015-02-03 05:49:08 +08:00
|
|
|
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
|
|
|
|
void atomic_write_u(unsigned *p, unsigned x);
|
2011-03-19 09:15:37 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
2011-03-25 07:48:11 +08:00
|
|
|
/******************************************************************************/
|
2011-03-19 09:15:37 +08:00
|
|
|
/* 64-bit operations. */
|
2012-04-18 04:17:54 +08:00
|
|
|
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
2015-02-03 05:49:08 +08:00
|
|
|
# if (defined(__amd64__) || defined(__x86_64__))
|
2011-03-19 09:15:37 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint64_t t = x;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; xaddq %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint64_t t;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
x = (uint64_t)(-(int64_t)x);
|
|
|
|
t = x;
|
|
|
|
asm volatile (
|
|
|
|
"lock; xaddq %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint8_t success;
|
2012-04-30 18:38:31 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; cmpxchgq %4, %0;"
|
|
|
|
"sete %1;"
|
|
|
|
: "=m" (*p), "=a" (success) /* Outputs. */
|
|
|
|
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (!(bool)success);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint64(uint64_t *p, uint64_t x)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
2015-02-11 08:05:52 +08:00
|
|
|
"xchgq %1, %0;" /* Lock is implied by xchgq. */
|
2015-02-03 05:49:08 +08:00
|
|
|
: "=m" (*p), "+r" (x) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
2014-12-06 09:42:41 +08:00
|
|
|
# elif (defined(JEMALLOC_C11ATOMICS))
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
|
|
return (atomic_fetch_add(a, x) + x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
|
|
return (atomic_fetch_sub(a, x) - x);
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!atomic_compare_exchange_strong(p, &c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_store(p, x);
|
|
|
|
}
|
|
|
|
# elif (defined(JEMALLOC_ATOMIC9))
|
2011-03-19 10:10:31 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
/*
|
|
|
|
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
|
|
|
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
|
|
|
*/
|
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
atomic_store_rel_long(p, x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
# elif (defined(JEMALLOC_OSATOMIC))
|
2011-03-25 07:48:11 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
|
|
|
}
|
2011-03-25 07:48:11 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
uint64_t o;
|
|
|
|
|
|
|
|
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
|
|
|
do {
|
|
|
|
o = atomic_read_uint64(p);
|
|
|
|
} while (atomic_cas_uint64(p, o, x));
|
|
|
|
}
|
|
|
|
# elif (defined(_MSC_VER))
|
2012-04-18 04:17:54 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd64(p, x) + x);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
|
|
|
|
}
|
2012-04-18 04:17:54 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
|
|
{
|
|
|
|
uint64_t o;
|
|
|
|
|
|
|
|
o = InterlockedCompareExchange64(p, s, c);
|
|
|
|
return (o != c);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
InterlockedExchange64(p, x);
|
|
|
|
}
|
|
|
|
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
|
|
|
|
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
2012-03-26 23:03:41 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!__sync_bool_compare_and_swap(p, c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint64(uint64_t *p, uint64_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
__sync_lock_test_and_set(p, x);
|
|
|
|
}
|
2012-04-18 04:17:54 +08:00
|
|
|
# else
|
2011-03-23 00:00:56 +08:00
|
|
|
# error "Missing implementation for 64-bit atomic operations"
|
|
|
|
# endif
|
2011-03-19 09:15:37 +08:00
|
|
|
#endif
|
|
|
|
|
2011-03-25 07:48:11 +08:00
|
|
|
/******************************************************************************/
|
2011-03-19 09:15:37 +08:00
|
|
|
/* 32-bit operations. */
|
2015-02-03 05:49:08 +08:00
|
|
|
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
2011-03-19 09:15:37 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint32_t t = x;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; xaddl %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint32_t t;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
x = (uint32_t)(-(int32_t)x);
|
|
|
|
t = x;
|
|
|
|
asm volatile (
|
|
|
|
"lock; xaddl %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint8_t success;
|
2012-04-30 18:38:31 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; cmpxchgl %4, %0;"
|
|
|
|
"sete %1;"
|
|
|
|
: "=m" (*p), "=a" (success) /* Outputs. */
|
|
|
|
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
|
|
|
: "memory"
|
|
|
|
);
|
|
|
|
|
|
|
|
return (!(bool)success);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint32(uint32_t *p, uint32_t x)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
2015-02-11 08:05:52 +08:00
|
|
|
"xchgl %1, %0;" /* Lock is implied by xchgl. */
|
2015-02-03 05:49:08 +08:00
|
|
|
: "=m" (*p), "+r" (x) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
2014-12-06 09:42:41 +08:00
|
|
|
# elif (defined(JEMALLOC_C11ATOMICS))
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
|
|
return (atomic_fetch_add(a, x) + x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
|
|
return (atomic_fetch_sub(a, x) - x);
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!atomic_compare_exchange_strong(p, &c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_store(p, x);
|
|
|
|
}
|
|
|
|
#elif (defined(JEMALLOC_ATOMIC9))
|
2011-03-19 10:10:31 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (atomic_fetchadd_32(p, x) + x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!atomic_cmpset_32(p, c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_store_rel_32(p, x);
|
|
|
|
}
|
|
|
|
#elif (defined(JEMALLOC_OSATOMIC))
|
2011-03-25 07:48:11 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
|
|
|
}
|
2011-03-25 07:48:11 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
uint32_t o;
|
|
|
|
|
|
|
|
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
|
|
|
do {
|
|
|
|
o = atomic_read_uint32(p);
|
|
|
|
} while (atomic_cas_uint32(p, o, x));
|
|
|
|
}
|
|
|
|
#elif (defined(_MSC_VER))
|
2012-04-18 04:17:54 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd(p, x) + x);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
|
|
{
|
|
|
|
uint32_t o;
|
|
|
|
|
|
|
|
o = InterlockedCompareExchange32(p, s, c);
|
|
|
|
return (o != c);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
InterlockedExchange(p, x);
|
|
|
|
}
|
|
|
|
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
|
|
|
|
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
2012-03-06 04:16:57 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (!__sync_bool_compare_and_swap(p, c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_uint32(uint32_t *p, uint32_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
__sync_lock_test_and_set(p, x);
|
|
|
|
}
|
2011-03-19 09:15:37 +08:00
|
|
|
#else
|
|
|
|
# error "Missing implementation for 32-bit atomic operations"
|
|
|
|
#endif
|
2012-03-24 07:09:56 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Pointer operations. */
|
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
atomic_add_p(void **p, void *x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
atomic_sub_p(void **p, void *x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
return ((void *)atomic_add_uint64((uint64_t *)p,
|
|
|
|
(uint64_t)-((int64_t)x)));
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
return ((void *)atomic_add_uint32((uint32_t *)p,
|
|
|
|
(uint32_t)-((int32_t)x)));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_p(void **p, void *c, void *s)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
atomic_write_p(void **p, const void *x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-24 07:09:56 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* size_t operations. */
|
|
|
|
JEMALLOC_INLINE size_t
|
|
|
|
atomic_add_z(size_t *p, size_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE size_t
|
|
|
|
atomic_sub_z(size_t *p, size_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
|
|
|
(uint64_t)-((int64_t)x)));
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
|
|
|
(uint32_t)-((int32_t)x)));
|
|
|
|
#endif
|
|
|
|
}
|
2012-03-24 09:05:51 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_z(size_t *p, size_t c, size_t s)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_z(size_t *p, size_t x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
|
|
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
|
|
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* unsigned operations. */
|
|
|
|
JEMALLOC_INLINE unsigned
|
|
|
|
atomic_add_u(unsigned *p, unsigned x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
|
|
|
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
|
|
|
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE unsigned
|
|
|
|
atomic_sub_u(unsigned *p, unsigned x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
|
|
|
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
|
|
|
(uint64_t)-((int64_t)x)));
|
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
|
|
|
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
|
|
|
(uint32_t)-((int32_t)x)));
|
|
|
|
#endif
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
|
|
|
return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
|
|
|
return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_u(unsigned *p, unsigned x)
|
|
|
|
{
|
|
|
|
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
|
|
|
atomic_write_uint64((uint64_t *)p, (uint64_t)x);
|
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
|
|
|
atomic_write_uint32((uint32_t *)p, (uint32_t)x);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
/******************************************************************************/
|
2011-03-19 09:15:37 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
|
|
/******************************************************************************/
|