TSD: Add the ability to enter a global slow path.
This gives any thread the ability to send other threads down slow paths the next time they fetch tsd.
This commit is contained in:
committed by
David Goldblatt
parent
feff510b9f
commit
e870829e64
130
test/unit/tsd.c
130
test/unit/tsd.c
@@ -1,5 +1,10 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
/*
|
||||
* If we're e.g. in debug mode, we *never* enter the fast path, and so shouldn't
|
||||
* be asserting that we're on one.
|
||||
*/
|
||||
static bool originally_fast;
|
||||
static int data_cleanup_count;
|
||||
|
||||
void
|
||||
@@ -124,6 +129,128 @@ TEST_BEGIN(test_tsd_reincarnation) {
|
||||
}
|
||||
TEST_END
|
||||
|
||||
typedef struct {
|
||||
atomic_u32_t phase;
|
||||
atomic_b_t error;
|
||||
} global_slow_data_t;
|
||||
|
||||
static void *
|
||||
thd_start_global_slow(void *arg) {
|
||||
/* PHASE 0 */
|
||||
global_slow_data_t *data = (global_slow_data_t *)arg;
|
||||
free(mallocx(1, 0));
|
||||
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
/*
|
||||
* No global slowness has happened yet; there was an error if we were
|
||||
* originally fast but aren't now.
|
||||
*/
|
||||
atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
|
||||
ATOMIC_SEQ_CST);
|
||||
atomic_store_u32(&data->phase, 1, ATOMIC_SEQ_CST);
|
||||
|
||||
/* PHASE 2 */
|
||||
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 2) {
|
||||
}
|
||||
free(mallocx(1, 0));
|
||||
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
|
||||
atomic_store_u32(&data->phase, 3, ATOMIC_SEQ_CST);
|
||||
|
||||
/* PHASE 4 */
|
||||
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 4) {
|
||||
}
|
||||
free(mallocx(1, 0));
|
||||
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
|
||||
atomic_store_u32(&data->phase, 5, ATOMIC_SEQ_CST);
|
||||
|
||||
/* PHASE 6 */
|
||||
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 6) {
|
||||
}
|
||||
free(mallocx(1, 0));
|
||||
/* Only one decrement so far. */
|
||||
atomic_store_b(&data->error, tsd_fast(tsd), ATOMIC_SEQ_CST);
|
||||
atomic_store_u32(&data->phase, 7, ATOMIC_SEQ_CST);
|
||||
|
||||
/* PHASE 8 */
|
||||
while (atomic_load_u32(&data->phase, ATOMIC_SEQ_CST) != 8) {
|
||||
}
|
||||
free(mallocx(1, 0));
|
||||
/*
|
||||
* Both decrements happened; we should be fast again (if we ever
|
||||
* were)
|
||||
*/
|
||||
atomic_store_b(&data->error, originally_fast && !tsd_fast(tsd),
|
||||
ATOMIC_SEQ_CST);
|
||||
atomic_store_u32(&data->phase, 9, ATOMIC_SEQ_CST);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_tsd_global_slow) {
|
||||
global_slow_data_t data = {ATOMIC_INIT(0), ATOMIC_INIT(false)};
|
||||
/*
|
||||
* Note that the "mallocx" here (vs. malloc) is important, since the
|
||||
* compiler is allowed to optimize away free(malloc(1)) but not
|
||||
* free(mallocx(1)).
|
||||
*/
|
||||
free(mallocx(1, 0));
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
originally_fast = tsd_fast(tsd);
|
||||
|
||||
thd_t thd;
|
||||
thd_create(&thd, thd_start_global_slow, (void *)&data.phase);
|
||||
/* PHASE 1 */
|
||||
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 1) {
|
||||
/*
|
||||
* We don't have a portable condvar/semaphore mechanism.
|
||||
* Spin-wait.
|
||||
*/
|
||||
}
|
||||
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
|
||||
tsd_global_slow_inc(tsd_tsdn(tsd));
|
||||
free(mallocx(1, 0));
|
||||
assert_false(tsd_fast(tsd), "");
|
||||
atomic_store_u32(&data.phase, 2, ATOMIC_SEQ_CST);
|
||||
|
||||
/* PHASE 3 */
|
||||
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 3) {
|
||||
}
|
||||
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
|
||||
/* Increase again, so that we can test multiple fast/slow changes. */
|
||||
tsd_global_slow_inc(tsd_tsdn(tsd));
|
||||
atomic_store_u32(&data.phase, 4, ATOMIC_SEQ_CST);
|
||||
free(mallocx(1, 0));
|
||||
assert_false(tsd_fast(tsd), "");
|
||||
|
||||
/* PHASE 5 */
|
||||
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 5) {
|
||||
}
|
||||
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
|
||||
tsd_global_slow_dec(tsd_tsdn(tsd));
|
||||
atomic_store_u32(&data.phase, 6, ATOMIC_SEQ_CST);
|
||||
/* We only decreased once; things should still be slow. */
|
||||
free(mallocx(1, 0));
|
||||
assert_false(tsd_fast(tsd), "");
|
||||
|
||||
/* PHASE 7 */
|
||||
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 7) {
|
||||
}
|
||||
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
|
||||
tsd_global_slow_dec(tsd_tsdn(tsd));
|
||||
atomic_store_u32(&data.phase, 8, ATOMIC_SEQ_CST);
|
||||
/* We incremented and then decremented twice; we should be fast now. */
|
||||
free(mallocx(1, 0));
|
||||
assert_true(!originally_fast || tsd_fast(tsd), "");
|
||||
|
||||
/* PHASE 9 */
|
||||
while (atomic_load_u32(&data.phase, ATOMIC_SEQ_CST) != 9) {
|
||||
}
|
||||
assert_false(atomic_load_b(&data.error, ATOMIC_SEQ_CST), "");
|
||||
|
||||
thd_join(thd, NULL);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
/* Ensure tsd bootstrapped. */
|
||||
@@ -135,5 +262,6 @@ main(void) {
|
||||
return test_no_reentrancy(
|
||||
test_tsd_main_thread,
|
||||
test_tsd_sub_thread,
|
||||
test_tsd_reincarnation);
|
||||
test_tsd_reincarnation,
|
||||
test_tsd_global_slow);
|
||||
}
|
||||
|
Reference in New Issue
Block a user