PRNG: Remove atomic functionality.

These had no uses and complicated the API.  As a rule we now expect to only use
thread-local randomization for contention-reduction reasons, so we only pay the
API costs and never get the functionality benefits.
This commit is contained in:
David Goldblatt
2020-08-14 09:17:11 -07:00
committed by David Goldblatt
parent 0513047170
commit 9e6aa77ab9
2 changed files with 71 additions and 94 deletions

View File

@@ -1,34 +1,34 @@
#include "test/jemalloc_test.h"
static void
test_prng_lg_range_u32(bool atomic) {
atomic_u32_t sa, sb;
test_prng_lg_range_u32() {
uint32_t sa, sb;
uint32_t ra, rb;
unsigned lg_range;
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_u32(&sa, 32, atomic);
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
rb = prng_lg_range_u32(&sa, 32, atomic);
sa = 42;
ra = prng_lg_range_u32(&sa, 32);
sa = 42;
rb = prng_lg_range_u32(&sa, 32);
expect_u32_eq(ra, rb,
"Repeated generation should produce repeated results");
atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_u32(&sb, 32, atomic);
sb = 42;
rb = prng_lg_range_u32(&sb, 32);
expect_u32_eq(ra, rb,
"Equivalent generation should produce equivalent results");
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_u32(&sa, 32, atomic);
rb = prng_lg_range_u32(&sa, 32, atomic);
sa = 42;
ra = prng_lg_range_u32(&sa, 32);
rb = prng_lg_range_u32(&sa, 32);
expect_u32_ne(ra, rb,
"Full-width results must not immediately repeat");
atomic_store_u32(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_u32(&sa, 32, atomic);
sa = 42;
ra = prng_lg_range_u32(&sa, 32);
for (lg_range = 31; lg_range > 0; lg_range--) {
atomic_store_u32(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_u32(&sb, lg_range, atomic);
sb = 42;
rb = prng_lg_range_u32(&sb, lg_range);
expect_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
expect_u32_eq(rb, (ra >> (32 - lg_range)),
@@ -74,35 +74,35 @@ test_prng_lg_range_u64(void) {
}
static void
test_prng_lg_range_zu(bool atomic) {
atomic_zu_t sa, sb;
test_prng_lg_range_zu() {
size_t sa, sb;
size_t ra, rb;
unsigned lg_range;
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
sa = 42;
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
sa = 42;
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
expect_zu_eq(ra, rb,
"Repeated generation should produce repeated results");
atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
sb = 42;
rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR));
expect_zu_eq(ra, rb,
"Equivalent generation should produce equivalent results");
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
sa = 42;
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
expect_zu_ne(ra, rb,
"Full-width results must not immediately repeat");
atomic_store_zu(&sa, 42, ATOMIC_RELAXED);
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic);
sa = 42;
ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR));
for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0;
lg_range--) {
atomic_store_zu(&sb, 42, ATOMIC_RELAXED);
rb = prng_lg_range_zu(&sb, lg_range, atomic);
sb = 42;
rb = prng_lg_range_zu(&sb, lg_range);
expect_zu_eq((rb & (SIZE_T_MAX << lg_range)),
0, "High order bits should be 0, lg_range=%u", lg_range);
expect_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) -
@@ -112,12 +112,12 @@ test_prng_lg_range_zu(bool atomic) {
}
TEST_BEGIN(test_prng_lg_range_u32_nonatomic) {
test_prng_lg_range_u32(false);
test_prng_lg_range_u32();
}
TEST_END
TEST_BEGIN(test_prng_lg_range_u32_atomic) {
test_prng_lg_range_u32(true);
test_prng_lg_range_u32();
}
TEST_END
@@ -127,29 +127,29 @@ TEST_BEGIN(test_prng_lg_range_u64_nonatomic) {
TEST_END
TEST_BEGIN(test_prng_lg_range_zu_nonatomic) {
test_prng_lg_range_zu(false);
test_prng_lg_range_zu();
}
TEST_END
TEST_BEGIN(test_prng_lg_range_zu_atomic) {
test_prng_lg_range_zu(true);
test_prng_lg_range_zu();
}
TEST_END
static void
test_prng_range_u32(bool atomic) {
test_prng_range_u32() {
uint32_t range;
#define MAX_RANGE 10000000
#define RANGE_STEP 97
#define NREPS 10
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
atomic_u32_t s;
uint32_t s;
unsigned rep;
atomic_store_u32(&s, range, ATOMIC_RELAXED);
s = range;
for (rep = 0; rep < NREPS; rep++) {
uint32_t r = prng_range_u32(&s, range, atomic);
uint32_t r = prng_range_u32(&s, range);
expect_u32_lt(r, range, "Out of range");
}
@@ -177,19 +177,19 @@ test_prng_range_u64(void) {
}
static void
test_prng_range_zu(bool atomic) {
test_prng_range_zu() {
size_t range;
#define MAX_RANGE 10000000
#define RANGE_STEP 97
#define NREPS 10
for (range = 2; range < MAX_RANGE; range += RANGE_STEP) {
atomic_zu_t s;
size_t s;
unsigned rep;
atomic_store_zu(&s, range, ATOMIC_RELAXED);
s = range;
for (rep = 0; rep < NREPS; rep++) {
size_t r = prng_range_zu(&s, range, atomic);
size_t r = prng_range_zu(&s, range);
expect_zu_lt(r, range, "Out of range");
}