2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_ATOMIC_INLINES_H
|
|
|
|
#define JEMALLOC_INTERNAL_ATOMIC_INLINES_H
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2014-08-07 14:36:19 +08:00
|
|
|
/*
|
2015-02-03 05:49:08 +08:00
|
|
|
* All arithmetic functions return the arithmetic result of the atomic
|
|
|
|
* operation. Some atomic operation APIs return the value prior to mutation, in
|
|
|
|
* which case the following functions must redundantly compute the result so
|
|
|
|
* that it can be returned. These functions are normally inlined, so the extra
|
|
|
|
* operations can be optimized away if the return values aren't used by the
|
|
|
|
* callers.
|
2014-08-07 14:36:19 +08:00
|
|
|
*
|
2015-02-03 05:49:08 +08:00
|
|
|
* <t> atomic_read_<t>(<t> *p) { return (*p); }
|
2016-02-25 15:58:10 +08:00
|
|
|
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); }
|
|
|
|
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); }
|
2015-02-03 05:49:08 +08:00
|
|
|
* bool atomic_cas_<t>(<t> *p, <t> c, <t> s)
|
|
|
|
* {
|
|
|
|
* if (*p != c)
|
|
|
|
* return (true);
|
|
|
|
* *p = s;
|
|
|
|
* return (false);
|
|
|
|
* }
|
|
|
|
* void atomic_write_<t>(<t> *p, <t> x) { *p = x; }
|
2014-08-07 14:36:19 +08:00
|
|
|
*/
|
|
|
|
|
2011-03-19 09:15:37 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2016-11-08 03:27:48 +08:00
|
|
|
# if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
|
|
|
uint64_t atomic_add_u64(uint64_t *p, uint64_t x);
|
|
|
|
uint64_t atomic_sub_u64(uint64_t *p, uint64_t x);
|
|
|
|
bool atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s);
|
|
|
|
void atomic_write_u64(uint64_t *p, uint64_t x);
|
|
|
|
# endif
|
|
|
|
uint32_t atomic_add_u32(uint32_t *p, uint32_t x);
|
|
|
|
uint32_t atomic_sub_u32(uint32_t *p, uint32_t x);
|
|
|
|
bool atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s);
|
|
|
|
void atomic_write_u32(uint32_t *p, uint32_t x);
|
2015-02-03 05:49:08 +08:00
|
|
|
void *atomic_add_p(void **p, void *x);
|
|
|
|
void *atomic_sub_p(void **p, void *x);
|
|
|
|
bool atomic_cas_p(void **p, void *c, void *s);
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
void atomic_write_p(void **p, const void *x);
|
2016-11-08 03:27:48 +08:00
|
|
|
size_t atomic_add_zu(size_t *p, size_t x);
|
|
|
|
size_t atomic_sub_zu(size_t *p, size_t x);
|
|
|
|
bool atomic_cas_zu(size_t *p, size_t c, size_t s);
|
|
|
|
void atomic_write_zu(size_t *p, size_t x);
|
2012-03-24 09:05:51 +08:00
|
|
|
unsigned atomic_add_u(unsigned *p, unsigned x);
|
|
|
|
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
2015-02-03 05:49:08 +08:00
|
|
|
bool atomic_cas_u(unsigned *p, unsigned c, unsigned s);
|
|
|
|
void atomic_write_u(unsigned *p, unsigned x);
|
2011-03-19 09:15:37 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
2011-03-25 07:48:11 +08:00
|
|
|
/******************************************************************************/
|
2011-03-19 09:15:37 +08:00
|
|
|
/* 64-bit operations. */
|
2016-11-08 02:53:35 +08:00
|
|
|
#if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
|
|
|
|
# if (defined(__amd64__) || defined(__x86_64__))
|
2011-03-19 09:15:37 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u64(uint64_t *p, uint64_t x)
|
2011-03-19 09:15:37 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint64_t t = x;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; xaddq %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u64(uint64_t *p, uint64_t x)
|
2011-03-19 09:15:37 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint64_t t;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
x = (uint64_t)(-(int64_t)x);
|
|
|
|
t = x;
|
|
|
|
asm volatile (
|
|
|
|
"lock; xaddq %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint8_t success;
|
2012-04-30 18:38:31 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; cmpxchgq %4, %0;"
|
|
|
|
"sete %1;"
|
|
|
|
: "=m" (*p), "=a" (success) /* Outputs. */
|
|
|
|
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (!(bool)success);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64(uint64_t *p, uint64_t x)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
2015-02-11 08:05:52 +08:00
|
|
|
"xchgq %1, %0;" /* Lock is implied by xchgq. */
|
2015-02-03 05:49:08 +08:00
|
|
|
: "=m" (*p), "+r" (x) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
2016-11-08 02:53:35 +08:00
|
|
|
# elif (defined(JEMALLOC_C11ATOMICS))
|
2014-12-06 09:42:41 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u64(uint64_t *p, uint64_t x)
|
2014-12-06 09:42:41 +08:00
|
|
|
{
|
|
|
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
|
|
return (atomic_fetch_add(a, x) + x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u64(uint64_t *p, uint64_t x)
|
2014-12-06 09:42:41 +08:00
|
|
|
{
|
|
|
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
|
|
return (atomic_fetch_sub(a, x) - x);
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
2015-05-28 11:31:51 +08:00
|
|
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
|
|
return (!atomic_compare_exchange_strong(a, &c, s));
|
2015-02-03 05:49:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64(uint64_t *p, uint64_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
2015-05-28 11:31:51 +08:00
|
|
|
volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
|
|
|
|
atomic_store(a, x);
|
2015-02-03 05:49:08 +08:00
|
|
|
}
|
2016-11-08 02:53:35 +08:00
|
|
|
# elif (defined(JEMALLOC_ATOMIC9))
|
2011-03-19 10:10:31 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u64(uint64_t *p, uint64_t x)
|
2011-03-19 10:10:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
/*
|
|
|
|
* atomic_fetchadd_64() doesn't exist, but we only ever use this
|
|
|
|
* function on LP64 systems, so atomic_fetchadd_long() will do.
|
|
|
|
*/
|
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
return (atomic_fetchadd_long(p, (unsigned long)x) + x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u64(uint64_t *p, uint64_t x)
|
2011-03-19 10:10:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64(uint64_t *p, uint64_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
assert(sizeof(uint64_t) == sizeof(unsigned long));
|
|
|
|
|
|
|
|
atomic_store_rel_long(p, x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
2016-11-08 02:53:35 +08:00
|
|
|
# elif (defined(JEMALLOC_OSATOMIC))
|
2011-03-25 07:48:11 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u64(uint64_t *p, uint64_t x)
|
2011-03-25 07:48:11 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u64(uint64_t *p, uint64_t x)
|
2011-03-25 07:48:11 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
|
|
|
}
|
2011-03-25 07:48:11 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64(uint64_t *p, uint64_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
uint64_t o;
|
|
|
|
|
|
|
|
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
|
|
|
do {
|
2016-11-08 03:27:48 +08:00
|
|
|
o = atomic_read_u64(p);
|
|
|
|
} while (atomic_cas_u64(p, o, x));
|
2015-02-03 05:49:08 +08:00
|
|
|
}
|
2016-11-08 02:53:35 +08:00
|
|
|
# elif (defined(_MSC_VER))
|
2012-04-18 04:17:54 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u64(uint64_t *p, uint64_t x)
|
2012-04-18 04:17:54 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd64(p, x) + x);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u64(uint64_t *p, uint64_t x)
|
2012-04-18 04:17:54 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
|
|
|
|
}
|
2012-04-18 04:17:54 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
uint64_t o;
|
|
|
|
|
|
|
|
o = InterlockedCompareExchange64(p, s, c);
|
|
|
|
return (o != c);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64(uint64_t *p, uint64_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
InterlockedExchange64(p, x);
|
|
|
|
}
|
2016-11-08 02:53:35 +08:00
|
|
|
# elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \
|
2015-02-03 05:49:08 +08:00
|
|
|
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
2012-03-26 23:03:41 +08:00
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u64(uint64_t *p, uint64_t x)
|
2012-03-26 23:03:41 +08:00
|
|
|
{
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint64_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u64(uint64_t *p, uint64_t x)
|
2012-03-26 23:03:41 +08:00
|
|
|
{
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u64(uint64_t *p, uint64_t c, uint64_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
return (!__sync_bool_compare_and_swap(p, c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64(uint64_t *p, uint64_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
__sync_lock_test_and_set(p, x);
|
|
|
|
}
|
2016-11-08 02:53:35 +08:00
|
|
|
# else
|
|
|
|
# error "Missing implementation for 64-bit atomic operations"
|
|
|
|
# endif
|
2011-03-19 09:15:37 +08:00
|
|
|
#endif
|
|
|
|
|
2011-03-25 07:48:11 +08:00
|
|
|
/******************************************************************************/
|
2011-03-19 09:15:37 +08:00
|
|
|
/* 32-bit operations. */
|
2015-02-03 05:49:08 +08:00
|
|
|
#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
2011-03-19 09:15:37 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u32(uint32_t *p, uint32_t x)
|
2011-03-19 09:15:37 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint32_t t = x;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; xaddl %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u32(uint32_t *p, uint32_t x)
|
2011-03-19 09:15:37 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint32_t t;
|
2011-03-19 09:15:37 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
x = (uint32_t)(-(int32_t)x);
|
|
|
|
t = x;
|
|
|
|
asm volatile (
|
|
|
|
"lock; xaddl %0, %1;"
|
|
|
|
: "+r" (t), "=m" (*p) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
);
|
|
|
|
|
|
|
|
return (t + x);
|
2011-03-19 09:15:37 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
uint8_t success;
|
2012-04-30 18:38:31 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
|
|
|
"lock; cmpxchgl %4, %0;"
|
|
|
|
"sete %1;"
|
|
|
|
: "=m" (*p), "=a" (success) /* Outputs. */
|
|
|
|
: "m" (*p), "a" (c), "r" (s) /* Inputs. */
|
|
|
|
: "memory"
|
|
|
|
);
|
|
|
|
|
|
|
|
return (!(bool)success);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32(uint32_t *p, uint32_t x)
|
2012-04-30 18:38:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
asm volatile (
|
2015-02-11 08:05:52 +08:00
|
|
|
"xchgl %1, %0;" /* Lock is implied by xchgl. */
|
2015-02-03 05:49:08 +08:00
|
|
|
: "=m" (*p), "+r" (x) /* Outputs. */
|
|
|
|
: "m" (*p) /* Inputs. */
|
|
|
|
: "memory" /* Clobbers. */
|
|
|
|
);
|
2012-04-30 18:38:31 +08:00
|
|
|
}
|
2014-12-06 09:42:41 +08:00
|
|
|
# elif (defined(JEMALLOC_C11ATOMICS))
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u32(uint32_t *p, uint32_t x)
|
2014-12-06 09:42:41 +08:00
|
|
|
{
|
|
|
|
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
|
|
return (atomic_fetch_add(a, x) + x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u32(uint32_t *p, uint32_t x)
|
2014-12-06 09:42:41 +08:00
|
|
|
{
|
|
|
|
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
|
|
return (atomic_fetch_sub(a, x) - x);
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
2015-05-28 11:31:51 +08:00
|
|
|
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
|
|
return (!atomic_compare_exchange_strong(a, &c, s));
|
2015-02-03 05:49:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32(uint32_t *p, uint32_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
2015-05-28 11:31:51 +08:00
|
|
|
volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p;
|
|
|
|
atomic_store(a, x);
|
2015-02-03 05:49:08 +08:00
|
|
|
}
|
|
|
|
#elif (defined(JEMALLOC_ATOMIC9))
|
2011-03-19 10:10:31 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u32(uint32_t *p, uint32_t x)
|
2011-03-19 10:10:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (atomic_fetchadd_32(p, x) + x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u32(uint32_t *p, uint32_t x)
|
2011-03-19 10:10:31 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x);
|
2011-03-19 10:10:31 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
return (!atomic_cmpset_32(p, c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32(uint32_t *p, uint32_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
atomic_store_rel_32(p, x);
|
|
|
|
}
|
|
|
|
#elif (defined(JEMALLOC_OSATOMIC))
|
2011-03-25 07:48:11 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u32(uint32_t *p, uint32_t x)
|
2011-03-25 07:48:11 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u32(uint32_t *p, uint32_t x)
|
2011-03-25 07:48:11 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
|
|
|
}
|
2011-03-25 07:48:11 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p));
|
2011-03-25 07:48:11 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32(uint32_t *p, uint32_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
uint32_t o;
|
|
|
|
|
|
|
|
/*The documented OSAtomic*() API does not expose an atomic exchange. */
|
|
|
|
do {
|
2016-11-08 03:27:48 +08:00
|
|
|
o = atomic_read_u32(p);
|
|
|
|
} while (atomic_cas_u32(p, o, x));
|
2015-02-03 05:49:08 +08:00
|
|
|
}
|
|
|
|
#elif (defined(_MSC_VER))
|
2012-04-18 04:17:54 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u32(uint32_t *p, uint32_t x)
|
2012-04-18 04:17:54 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd(p, x) + x);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u32(uint32_t *p, uint32_t x)
|
2012-04-18 04:17:54 +08:00
|
|
|
{
|
2015-02-03 05:49:08 +08:00
|
|
|
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
uint32_t o;
|
|
|
|
|
2015-03-17 11:09:30 +08:00
|
|
|
o = InterlockedCompareExchange(p, s, c);
|
2015-02-03 05:49:08 +08:00
|
|
|
return (o != c);
|
2012-04-18 04:17:54 +08:00
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32(uint32_t *p, uint32_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
InterlockedExchange(p, x);
|
|
|
|
}
|
|
|
|
#elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \
|
|
|
|
defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
2012-03-06 04:16:57 +08:00
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_u32(uint32_t *p, uint32_t x)
|
2012-03-06 04:16:57 +08:00
|
|
|
{
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE uint32_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_u32(uint32_t *p, uint32_t x)
|
2012-03-06 04:16:57 +08:00
|
|
|
{
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_u32(uint32_t *p, uint32_t c, uint32_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
return (!__sync_bool_compare_and_swap(p, c, s));
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32(uint32_t *p, uint32_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
__sync_lock_test_and_set(p, x);
|
|
|
|
}
|
2011-03-19 09:15:37 +08:00
|
|
|
#else
|
|
|
|
# error "Missing implementation for 32-bit atomic operations"
|
|
|
|
#endif
|
2012-03-24 07:09:56 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* Pointer operations. */
|
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
atomic_add_p(void **p, void *x)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)x));
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((void *)atomic_add_u32((uint32_t *)p, (uint32_t)x));
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void *
|
|
|
|
atomic_sub_p(void **p, void *x)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((void *)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((void *)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x)));
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_p(void **p, void *c, void *s)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
Move centralized chunk management into arenas.
Migrate all centralized data structures related to huge allocations and
recyclable chunks into arena_t, so that each arena can manage huge
allocations and recyclable virtual memory completely independently of
other arenas.
Add chunk node caching to arenas, in order to avoid contention on the
base allocator.
Use chunks_rtree to look up huge allocations rather than a red-black
tree. Maintain a per arena unsorted list of huge allocations (which
will be needed to enumerate huge allocations during arena reset).
Remove the --enable-ivsalloc option, make ivsalloc() always available,
and use it for size queries if --enable-debug is enabled. The only
practical implications to this removal are that 1) ivsalloc() is now
always available during live debugging (and the underlying radix tree is
available during core-based debugging), and 2) size query validation can
no longer be enabled independent of --enable-debug.
Remove the stats.chunks.{current,total,high} mallctls, and replace their
underlying statistics with simpler atomically updated counters used
exclusively for gdump triggering. These statistics are no longer very
useful because each arena manages chunks independently, and per arena
statistics provide similar information.
Simplify chunk synchronization code, now that base chunk allocation
cannot cause recursive lock acquisition.
2015-02-12 04:24:27 +08:00
|
|
|
atomic_write_p(void **p, const void *x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64((uint64_t *)p, (uint64_t)x);
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32((uint32_t *)p, (uint32_t)x);
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-24 07:09:56 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* size_t operations. */
|
|
|
|
JEMALLOC_INLINE size_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_add_zu(size_t *p, size_t x)
|
2012-03-24 07:09:56 +08:00
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)x));
|
2012-03-24 07:09:56 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((size_t)atomic_add_u32((uint32_t *)p, (uint32_t)x));
|
2012-03-24 07:09:56 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE size_t
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_sub_zu(size_t *p, size_t x)
|
2012-03-24 07:09:56 +08:00
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((size_t)atomic_add_u64((uint64_t *)p, (uint64_t)-((int64_t)x)));
|
2012-03-24 07:09:56 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((size_t)atomic_add_u32((uint32_t *)p, (uint32_t)-((int32_t)x)));
|
2012-03-24 07:09:56 +08:00
|
|
|
#endif
|
|
|
|
}
|
2012-03-24 09:05:51 +08:00
|
|
|
|
2015-02-03 05:49:08 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_cas_zu(size_t *p, size_t c, size_t s)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_zu(size_t *p, size_t x)
|
2015-02-03 05:49:08 +08:00
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64((uint64_t *)p, (uint64_t)x);
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_PTR == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32((uint32_t *)p, (uint32_t)x);
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
/******************************************************************************/
|
|
|
|
/* unsigned operations. */
|
|
|
|
JEMALLOC_INLINE unsigned
|
|
|
|
atomic_add_u(unsigned *p, unsigned x)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((unsigned)atomic_add_u64((uint64_t *)p, (uint64_t)x));
|
2012-03-24 09:05:51 +08:00
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((unsigned)atomic_add_u32((uint32_t *)p, (uint32_t)x));
|
2012-03-24 09:05:51 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE unsigned
|
|
|
|
atomic_sub_u(unsigned *p, unsigned x)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((unsigned)atomic_add_u64((uint64_t *)p,
|
2012-03-24 09:05:51 +08:00
|
|
|
(uint64_t)-((int64_t)x)));
|
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return ((unsigned)atomic_add_u32((uint32_t *)p,
|
2012-03-24 09:05:51 +08:00
|
|
|
(uint32_t)-((int32_t)x)));
|
|
|
|
#endif
|
|
|
|
}
|
2015-02-03 05:49:08 +08:00
|
|
|
|
|
|
|
JEMALLOC_INLINE bool
|
|
|
|
atomic_cas_u(unsigned *p, unsigned c, unsigned s)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
return (atomic_cas_u64((uint64_t *)p, (uint64_t)c, (uint64_t)s));
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
return (atomic_cas_u32((uint32_t *)p, (uint32_t)c, (uint32_t)s));
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
|
|
|
atomic_write_u(unsigned *p, unsigned x)
|
|
|
|
{
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u64((uint64_t *)p, (uint64_t)x);
|
2015-02-03 05:49:08 +08:00
|
|
|
#elif (LG_SIZEOF_INT == 2)
|
2016-11-08 03:27:48 +08:00
|
|
|
atomic_write_u32((uint32_t *)p, (uint32_t)x);
|
2015-02-03 05:49:08 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-03-24 09:05:51 +08:00
|
|
|
/******************************************************************************/
|
2011-03-19 09:15:37 +08:00
|
|
|
#endif
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_ATOMIC_INLINES_H */
|