c1e567bda0
These functions may be available as inlines or as libgcc functions. In the former case, a __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macro is defined. But we still want to use these functions in the latter case, when we don't have our own implementation.
241 lines
5.7 KiB
C
241 lines
5.7 KiB
C
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_TYPES
|
|
|
|
#endif /* JEMALLOC_H_TYPES */
|
|
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_STRUCTS
|
|
|
|
#endif /* JEMALLOC_H_STRUCTS */
|
|
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_EXTERNS
|
|
|
|
#define atomic_read_uint64(p) atomic_add_uint64(p, 0)
|
|
#define atomic_read_uint32(p) atomic_add_uint32(p, 0)
|
|
#define atomic_read_z(p) atomic_add_z(p, 0)
|
|
#define atomic_read_u(p) atomic_add_u(p, 0)
|
|
|
|
#endif /* JEMALLOC_H_EXTERNS */
|
|
/******************************************************************************/
|
|
#ifdef JEMALLOC_H_INLINES
|
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
|
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
|
|
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
|
|
uint32_t atomic_add_uint32(uint32_t *p, uint32_t x);
|
|
uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x);
|
|
size_t atomic_add_z(size_t *p, size_t x);
|
|
size_t atomic_sub_z(size_t *p, size_t x);
|
|
unsigned atomic_add_u(unsigned *p, unsigned x);
|
|
unsigned atomic_sub_u(unsigned *p, unsigned x);
|
|
#endif
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_))
|
|
/******************************************************************************/
|
|
/* 64-bit operations. */
|
|
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
}
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
}
|
|
#elif (defined(JEMALLOC_OSATOMIC))
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
return (OSAtomicAdd64((int64_t)x, (int64_t *)p));
|
|
}
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p));
|
|
}
|
|
#elif (defined(__amd64__) || defined(__x86_64__))
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
asm volatile (
|
|
"lock; xaddq %0, %1;"
|
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
|
: "m" (*p) /* Inputs. */
|
|
);
|
|
|
|
return (x);
|
|
}
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
x = (uint64_t)(-(int64_t)x);
|
|
asm volatile (
|
|
"lock; xaddq %0, %1;"
|
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
|
: "m" (*p) /* Inputs. */
|
|
);
|
|
|
|
return (x);
|
|
}
|
|
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8))
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_add_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
}
|
|
|
|
JEMALLOC_INLINE uint64_t
|
|
atomic_sub_uint64(uint64_t *p, uint64_t x)
|
|
{
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
}
|
|
#else
|
|
# if (LG_SIZEOF_PTR == 3)
|
|
# error "Missing implementation for 64-bit atomic operations"
|
|
# endif
|
|
#endif
|
|
|
|
/******************************************************************************/
|
|
/* 32-bit operations. */
|
|
#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
}
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
}
|
|
#elif (defined(JEMALLOC_OSATOMIC))
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
return (OSAtomicAdd32((int32_t)x, (int32_t *)p));
|
|
}
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p));
|
|
}
|
|
#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
asm volatile (
|
|
"lock; xaddl %0, %1;"
|
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
|
: "m" (*p) /* Inputs. */
|
|
);
|
|
|
|
return (x);
|
|
}
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
x = (uint32_t)(-(int32_t)x);
|
|
asm volatile (
|
|
"lock; xaddl %0, %1;"
|
|
: "+r" (x), "=m" (*p) /* Outputs. */
|
|
: "m" (*p) /* Inputs. */
|
|
);
|
|
|
|
return (x);
|
|
}
|
|
#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4))
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_add_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
return (__sync_add_and_fetch(p, x));
|
|
}
|
|
|
|
JEMALLOC_INLINE uint32_t
|
|
atomic_sub_uint32(uint32_t *p, uint32_t x)
|
|
{
|
|
|
|
return (__sync_sub_and_fetch(p, x));
|
|
}
|
|
#else
|
|
# error "Missing implementation for 32-bit atomic operations"
|
|
#endif
|
|
|
|
/******************************************************************************/
|
|
/* size_t operations. */
|
|
JEMALLOC_INLINE size_t
|
|
atomic_add_z(size_t *p, size_t x)
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
#endif
|
|
}
|
|
|
|
JEMALLOC_INLINE size_t
|
|
atomic_sub_z(size_t *p, size_t x)
|
|
{
|
|
|
|
#if (LG_SIZEOF_PTR == 3)
|
|
return ((size_t)atomic_add_uint64((uint64_t *)p,
|
|
(uint64_t)-((int64_t)x)));
|
|
#elif (LG_SIZEOF_PTR == 2)
|
|
return ((size_t)atomic_add_uint32((uint32_t *)p,
|
|
(uint32_t)-((int32_t)x)));
|
|
#endif
|
|
}
|
|
|
|
/******************************************************************************/
|
|
/* unsigned operations. */
|
|
JEMALLOC_INLINE unsigned
|
|
atomic_add_u(unsigned *p, unsigned x)
|
|
{
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
|
return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x));
|
|
#elif (LG_SIZEOF_INT == 2)
|
|
return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x));
|
|
#endif
|
|
}
|
|
|
|
JEMALLOC_INLINE unsigned
|
|
atomic_sub_u(unsigned *p, unsigned x)
|
|
{
|
|
|
|
#if (LG_SIZEOF_INT == 3)
|
|
return ((unsigned)atomic_add_uint64((uint64_t *)p,
|
|
(uint64_t)-((int64_t)x)));
|
|
#elif (LG_SIZEOF_INT == 2)
|
|
return ((unsigned)atomic_add_uint32((uint32_t *)p,
|
|
(uint32_t)-((int32_t)x)));
|
|
#endif
|
|
}
|
|
/******************************************************************************/
|
|
#endif
|
|
|
|
#endif /* JEMALLOC_H_INLINES */
|
|
/******************************************************************************/
|