Add atomic operations tests and fix latent bugs.

This commit is contained in:
Jason Evans 2014-08-06 23:36:19 -07:00
parent 7f944aa621
commit a2ea54c986
3 changed files with 128 additions and 13 deletions

View File

@ -110,7 +110,8 @@ C_TESTLIB_SRCS := $(srcroot)test/src/math.c $(srcroot)test/src/mtx.c \
$(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
$(srcroot)test/src/thd.c
C_UTIL_INTEGRATION_SRCS := $(srcroot)src/util.c
TESTS_UNIT := $(srcroot)test/unit/bitmap.c \
TESTS_UNIT := $(srcroot)test/unit/atomic.c \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \

View File

@ -18,6 +18,17 @@
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
/*
* All functions return the arithmetic result of the atomic operation. Some
* atomic operation APIs return the value prior to mutation, in which case the
* following functions must redundantly compute the result so that it can be
* returned. These functions are normally inlined, so the extra operations can
* be optimized away if the return values aren't used by the callers.
*
* <t> atomic_add_<t>(<t> *p, <t> x) { return (*p + x); }
* <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p - x); }
*/
#ifndef JEMALLOC_ENABLE_INLINE
uint64_t atomic_add_uint64(uint64_t *p, uint64_t x);
uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x);
@ -52,14 +63,14 @@ JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, x));
return (InterlockedExchangeAdd64(p, x) + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
return (InterlockedExchangeAdd64(p, -((int64_t)x)));
return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint64_t
@ -79,28 +90,31 @@ atomic_sub_uint64(uint64_t *p, uint64_t x)
JEMALLOC_INLINE uint64_t
atomic_add_uint64(uint64_t *p, uint64_t x)
{
uint64_t t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
JEMALLOC_INLINE uint64_t
atomic_sub_uint64(uint64_t *p, uint64_t x)
{
uint64_t t;
x = (uint64_t)(-(int64_t)x);
t = x;
asm volatile (
"lock; xaddq %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
# elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint64_t
@ -164,14 +178,14 @@ JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, x));
return (InterlockedExchangeAdd(p, x) + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
return (InterlockedExchangeAdd(p, -((int32_t)x)));
return (InterlockedExchangeAdd(p, -((int32_t)x)) - x);
}
#elif (defined(JEMALLOC_OSATOMIC))
JEMALLOC_INLINE uint32_t
@ -191,28 +205,31 @@ atomic_sub_uint32(uint32_t *p, uint32_t x)
JEMALLOC_INLINE uint32_t
atomic_add_uint32(uint32_t *p, uint32_t x)
{
uint32_t t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
JEMALLOC_INLINE uint32_t
atomic_sub_uint32(uint32_t *p, uint32_t x)
{
uint32_t t;
x = (uint32_t)(-(int32_t)x);
t = x;
asm volatile (
"lock; xaddl %0, %1;"
: "+r" (x), "=m" (*p) /* Outputs. */
: "+r" (t), "=m" (*p) /* Outputs. */
: "m" (*p) /* Inputs. */
);
return (x);
return (t + x);
}
#elif (defined(JEMALLOC_ATOMIC9))
JEMALLOC_INLINE uint32_t

97
test/unit/atomic.c Normal file
View File

@ -0,0 +1,97 @@
#include "test/jemalloc_test.h"
#define TEST_STRUCT(p, t) \
struct p##_test_s { \
t accum0; \
t x; \
}; \
typedef struct p##_test_s p##_test_t;
#define TEST_BODY(p, t, PRI) do { \
const p##_test_t tests[] = { \
{-1, -1}, \
{-1, 0}, \
{-1, 1}, \
\
{ 0, -1}, \
{ 0, 0}, \
{ 0, 1}, \
\
{ 1, -1}, \
{ 1, 0}, \
{ 1, 1}, \
\
{0, -(1 << 22)}, \
{0, (1 << 22)}, \
{(1 << 22), -(1 << 22)}, \
{(1 << 22), (1 << 22)} \
}; \
unsigned i; \
\
for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \
t accum = tests[i].accum0; \
assert_u64_eq(atomic_read_##p(&accum), tests[i].accum0, \
"i=%u", i); \
assert_u64_eq(atomic_add_##p(&accum, tests[i].x), \
tests[i].accum0 + tests[i].x, \
"i=%u, accum=%#"PRI", x=%#"PRI, \
i, tests[i].accum0, tests[i].x); \
assert_u64_eq(atomic_read_##p(&accum), accum, \
"i=%u", i); \
\
accum = tests[i].accum0; \
assert_u64_eq(atomic_sub_##p(&accum, tests[i].x), \
tests[i].accum0 - tests[i].x, \
"i=%u, accum=%#"PRI", x=%#"PRI, \
i, tests[i].accum0, tests[i].x); \
assert_u64_eq(atomic_read_##p(&accum), accum, \
"i=%u", i); \
} \
} while (0)
TEST_STRUCT(uint64, uint64_t)
TEST_BEGIN(test_atomic_uint64)
{
#if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3)
test_skip("64-bit atomic operations not supported");
#else
TEST_BODY(uint64, uint64_t, PRIx64);
#endif
}
TEST_END
TEST_STRUCT(uint32, uint32_t)
TEST_BEGIN(test_atomic_uint32)
{
TEST_BODY(uint32, uint32_t, PRIx32);
}
TEST_END
TEST_STRUCT(z, size_t)
TEST_BEGIN(test_atomic_z)
{
TEST_BODY(z, size_t, "zx");
}
TEST_END
TEST_STRUCT(u, unsigned)
TEST_BEGIN(test_atomic_u)
{
TEST_BODY(u, unsigned, "x");
}
TEST_END
int
main(void)
{
return (test(
test_atomic_uint64,
test_atomic_uint32,
test_atomic_z,
test_atomic_u));
}