Convert prng module to use C11-style atomics
This commit is contained in:
parent
492e9f301e
commit
7da04a6b09
@ -166,7 +166,7 @@ struct arena_s {
|
|||||||
*
|
*
|
||||||
* Synchronization: atomic.
|
* Synchronization: atomic.
|
||||||
*/
|
*/
|
||||||
size_t offset_state;
|
atomic_zu_t offset_state;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extent serial number generator state.
|
* Extent serial number generator state.
|
||||||
|
@ -6,14 +6,15 @@ uint32_t prng_state_next_u32(uint32_t state);
|
|||||||
uint64_t prng_state_next_u64(uint64_t state);
|
uint64_t prng_state_next_u64(uint64_t state);
|
||||||
size_t prng_state_next_zu(size_t state);
|
size_t prng_state_next_zu(size_t state);
|
||||||
|
|
||||||
uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range,
|
uint32_t prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range,
|
||||||
bool atomic);
|
bool atomic);
|
||||||
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
|
uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range);
|
||||||
size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic);
|
size_t prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic);
|
||||||
|
|
||||||
uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic);
|
uint32_t prng_range_u32(atomic_u32_t *state, uint32_t range,
|
||||||
|
bool atomic);
|
||||||
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
|
uint64_t prng_range_u64(uint64_t *state, uint64_t range);
|
||||||
size_t prng_range_zu(size_t *state, size_t range, bool atomic);
|
size_t prng_range_zu(atomic_zu_t *state, size_t range, bool atomic);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_))
|
||||||
@ -39,22 +40,22 @@ prng_state_next_zu(size_t state) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||||
prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) {
|
prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
|
||||||
uint32_t ret, state1;
|
uint32_t ret, state0, state1;
|
||||||
|
|
||||||
assert(lg_range > 0);
|
assert(lg_range > 0);
|
||||||
assert(lg_range <= 32);
|
assert(lg_range <= 32);
|
||||||
|
|
||||||
if (atomic) {
|
state0 = atomic_load_u32(state, ATOMIC_RELAXED);
|
||||||
uint32_t state0;
|
|
||||||
|
|
||||||
|
if (atomic) {
|
||||||
do {
|
do {
|
||||||
state0 = atomic_read_u32(state);
|
|
||||||
state1 = prng_state_next_u32(state0);
|
state1 = prng_state_next_u32(state0);
|
||||||
} while (atomic_cas_u32(state, state0, state1));
|
} while (!atomic_compare_exchange_weak_u32(state, &state0,
|
||||||
|
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
} else {
|
} else {
|
||||||
state1 = prng_state_next_u32(*state);
|
state1 = prng_state_next_u32(state0);
|
||||||
*state = state1;
|
atomic_store_u32(state, state1, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
ret = state1 >> (32 - lg_range);
|
ret = state1 >> (32 - lg_range);
|
||||||
|
|
||||||
@ -77,22 +78,22 @@ prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) {
|
prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
|
||||||
size_t ret, state1;
|
size_t ret, state0, state1;
|
||||||
|
|
||||||
assert(lg_range > 0);
|
assert(lg_range > 0);
|
||||||
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
|
||||||
|
|
||||||
if (atomic) {
|
state0 = atomic_load_zu(state, ATOMIC_RELAXED);
|
||||||
size_t state0;
|
|
||||||
|
|
||||||
|
if (atomic) {
|
||||||
do {
|
do {
|
||||||
state0 = atomic_read_zu(state);
|
|
||||||
state1 = prng_state_next_zu(state0);
|
state1 = prng_state_next_zu(state0);
|
||||||
} while (atomic_cas_zu(state, state0, state1));
|
} while (atomic_compare_exchange_weak_zu(state, &state0,
|
||||||
|
state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||||
} else {
|
} else {
|
||||||
state1 = prng_state_next_zu(*state);
|
state1 = prng_state_next_zu(state0);
|
||||||
*state = state1;
|
atomic_store_zu(state, state1, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
|
||||||
|
|
||||||
@ -100,7 +101,7 @@ prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE uint32_t
|
JEMALLOC_ALWAYS_INLINE uint32_t
|
||||||
prng_range_u32(uint32_t *state, uint32_t range, bool atomic) {
|
prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
|
||||||
uint32_t ret;
|
uint32_t ret;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
@ -136,7 +137,7 @@ prng_range_u64(uint64_t *state, uint64_t range) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
prng_range_zu(size_t *state, size_t range, bool atomic) {
|
prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
|
||||||
size_t ret;
|
size_t ret;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
|
@ -1863,8 +1863,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
* cost of test repeatability. For debug builds, instead use a
|
* cost of test repeatability. For debug builds, instead use a
|
||||||
* deterministic seed.
|
* deterministic seed.
|
||||||
*/
|
*/
|
||||||
arena->offset_state = config_debug ? ind :
|
atomic_store_zu(&arena->offset_state, config_debug ? ind :
|
||||||
(size_t)(uintptr_t)arena;
|
(size_t)(uintptr_t)arena, ATOMIC_RELAXED);
|
||||||
}
|
}
|
||||||
|
|
||||||
arena->extent_sn_next = 0;
|
arena->extent_sn_next = 0;
|
||||||
|
@ -2,31 +2,32 @@
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
test_prng_lg_range_u32(bool atomic) {
|
test_prng_lg_range_u32(bool atomic) {
|
||||||
uint32_t sa, sb, ra, rb;
|
atomic_u32_t sa, sb;
|
||||||
|
uint32_t ra, rb;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
sa = 42;
|
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
|
||||||
ra = prng_lg_range_u32(&sa, 32, atomic);
|
ra = prng_lg_range_u32(&sa, 32, atomic);
|
||||||
sa = 42;
|
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
|
||||||
rb = prng_lg_range_u32(&sa, 32, atomic);
|
rb = prng_lg_range_u32(&sa, 32, atomic);
|
||||||
assert_u32_eq(ra, rb,
|
assert_u32_eq(ra, rb,
|
||||||
"Repeated generation should produce repeated results");
|
"Repeated generation should produce repeated results");
|
||||||
|
|
||||||
sb = 42;
|
atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
|
||||||
rb = prng_lg_range_u32(&sb, 32, atomic);
|
rb = prng_lg_range_u32(&sb, 32, atomic);
|
||||||
assert_u32_eq(ra, rb,
|
assert_u32_eq(ra, rb,
|
||||||
"Equivalent generation should produce equivalent results");
|
"Equivalent generation should produce equivalent results");
|
||||||
|
|
||||||
sa = 42;
|
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
|
||||||
ra = prng_lg_range_u32(&sa, 32, atomic);
|
ra = prng_lg_range_u32(&sa, 32, atomic);
|
||||||
rb = prng_lg_range_u32(&sa, 32, atomic);
|
rb = prng_lg_range_u32(&sa, 32, atomic);
|
||||||
assert_u32_ne(ra, rb,
|
assert_u32_ne(ra, rb,
|
||||||
"Full-width results must not immediately repeat");
|
"Full-width results must not immediately repeat");
|
||||||
|
|
||||||
sa = 42;
|
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
|
||||||
ra = prng_lg_range_u32(&sa, 32, atomic);
|
ra = prng_lg_range_u32(&sa, 32, atomic);
|
||||||
for (lg_range = 31; lg_range > 0; lg_range--) {
|
for (lg_range = 31; lg_range > 0; lg_range--) {
|
||||||
sb = 42;
|
atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
|
||||||
rb = prng_lg_range_u32(&sb, lg_range, atomic);
|
rb = prng_lg_range_u32(&sb, lg_range, atomic);
|
||||||
assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
|
assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
|
||||||
0, "High order bits should be 0, lg_range=%u", lg_range);
|
0, "High order bits should be 0, lg_range=%u", lg_range);
|
||||||
@ -74,32 +75,33 @@ test_prng_lg_range_u64(void) {
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
test_prng_lg_range_zu(bool atomic) {
|
test_prng_lg_range_zu(bool atomic) {
|
||||||
size_t sa, sb, ra, rb;
|
atomic_zu_t sa, sb;
|
||||||
|
size_t ra, rb;
|
||||||
unsigned lg_range;
|
unsigned lg_range;
|
||||||
|
|
||||||
sa = 42;
|
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
|
||||||
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||||
sa = 42;
|
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
|
||||||
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||||
assert_zu_eq(ra, rb,
|
assert_zu_eq(ra, rb,
|
||||||
"Repeated generation should produce repeated results");
|
"Repeated generation should produce repeated results");
|
||||||
|
|
||||||
sb = 42;
|
atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
|
||||||
rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||||
assert_zu_eq(ra, rb,
|
assert_zu_eq(ra, rb,
|
||||||
"Equivalent generation should produce equivalent results");
|
"Equivalent generation should produce equivalent results");
|
||||||
|
|
||||||
sa = 42;
|
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
|
||||||
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||||
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||||
assert_zu_ne(ra, rb,
|
assert_zu_ne(ra, rb,
|
||||||
"Full-width results must not immediately repeat");
|
"Full-width results must not immediately repeat");
|
||||||
|
|
||||||
sa = 42;
|
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
|
||||||
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
|
||||||
for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
|
for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
|
||||||
lg_range--) {
|
lg_range--) {
|
||||||
sb = 42;
|
atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
|
||||||
rb = prng_lg_range_zu(&sb, lg_range, atomic);
|
rb = prng_lg_range_zu(&sb, lg_range, atomic);
|
||||||
assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
|
assert_zu_eq((rb & (SIZE_T_MAX << lg_range)),
|
||||||
0, "High order bits should be 0, lg_range=%u", lg_range);
|
0, "High order bits should be 0, lg_range=%u", lg_range);
|
||||||
@ -142,10 +144,10 @@ test_prng_range_u32(bool atomic) {
|
|||||||
#define NREPS 10
|
#define NREPS 10
|
||||||
|
|
||||||
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
||||||
uint32_t s;
|
atomic_u32_t s;
|
||||||
unsigned rep;
|
unsigned rep;
|
||||||
|
|
||||||
s = range;
|
atomic_store_u32(&s, range, ATOMIC_RELAXED);
|
||||||
for (rep = 0; rep < NREPS; rep++) {
|
for (rep = 0; rep < NREPS; rep++) {
|
||||||
uint32_t r = prng_range_u32(&s, range, atomic);
|
uint32_t r = prng_range_u32(&s, range, atomic);
|
||||||
|
|
||||||
@ -182,10 +184,10 @@ test_prng_range_zu(bool atomic) {
|
|||||||
#define NREPS 10
|
#define NREPS 10
|
||||||
|
|
||||||
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
|
||||||
size_t s;
|
atomic_zu_t s;
|
||||||
unsigned rep;
|
unsigned rep;
|
||||||
|
|
||||||
s = range;
|
atomic_store_zu(&s, range, ATOMIC_RELAXED);
|
||||||
for (rep = 0; rep < NREPS; rep++) {
|
for (rep = 0; rep < NREPS; rep++) {
|
||||||
size_t r = prng_range_zu(&s, range, atomic);
|
size_t r = prng_range_zu(&s, range, atomic);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user