Remove thread_event_rollback()

This commit is contained in:
Yinan Zhang 2020-03-09 17:05:06 -07:00
parent ba783b3a0f
commit a5780598b3
3 changed files with 1 additions and 67 deletions

View File

@ -33,7 +33,6 @@ typedef struct te_ctx_s {
void te_assert_invariants_debug(tsd_t *tsd); void te_assert_invariants_debug(tsd_t *tsd);
void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx, bool delay_event); void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx, bool delay_event);
void te_alloc_rollback(tsd_t *tsd, size_t diff);
void te_event_update(tsd_t *tsd, bool alloc_event); void te_event_update(tsd_t *tsd, bool alloc_event);
void te_recompute_fast_threshold(tsd_t *tsd); void te_recompute_fast_threshold(tsd_t *tsd);
void tsd_te_init(tsd_t *tsd); void tsd_te_init(tsd_t *tsd);

View File

@ -320,55 +320,6 @@ te_event_trigger(tsd_t *tsd, te_ctx_t *ctx, bool delay_event) {
te_assert_invariants(tsd); te_assert_invariants(tsd);
} }
void
te_alloc_rollback(tsd_t *tsd, size_t diff) {
te_assert_invariants(tsd);
if (diff == 0U) {
return;
}
/* Rollback happens only on alloc events. */
te_ctx_t ctx;
te_ctx_get(tsd, &ctx, true);
uint64_t thread_allocated = te_ctx_current_bytes_get(&ctx);
/* The subtraction is intentionally susceptible to underflow. */
uint64_t thread_allocated_rollback = thread_allocated - diff;
te_ctx_current_bytes_set(&ctx, thread_allocated_rollback);
uint64_t last_event = te_ctx_last_event_get(&ctx);
/* Both subtractions are intentionally susceptible to underflow. */
if (thread_allocated_rollback - last_event <=
thread_allocated - last_event) {
te_assert_invariants(tsd);
return;
}
te_ctx_last_event_set(&ctx, thread_allocated_rollback);
/* The subtraction is intentionally susceptible to underflow. */
uint64_t wait_diff = last_event - thread_allocated_rollback;
assert(wait_diff <= diff);
#define E(event, condition, alloc_event) \
if (alloc_event == true && condition) { \
uint64_t event_wait = event##_event_wait_get(tsd); \
assert(event_wait <= TE_MAX_START_WAIT); \
if (event_wait > 0U) { \
if (wait_diff > TE_MAX_START_WAIT - event_wait) {\
event_wait = TE_MAX_START_WAIT; \
} else { \
event_wait += wait_diff; \
} \
assert(event_wait <= TE_MAX_START_WAIT); \
event##_event_wait_set(tsd, event_wait); \
} \
}
ITERATE_OVER_ALL_EVENTS
#undef E
te_event_update(tsd, true);
}
void void
te_event_update(tsd_t *tsd, bool is_alloc) { te_event_update(tsd_t *tsd, bool is_alloc) {
te_ctx_t ctx; te_ctx_t ctx;

View File

@ -27,24 +27,8 @@ TEST_BEGIN(test_next_event_fast) {
} }
TEST_END TEST_END
TEST_BEGIN(test_event_rollback) {
tsd_t *tsd = tsd_fetch();
const uint64_t diff = TE_MAX_INTERVAL >> 2;
size_t count = 10;
uint64_t thread_allocated = thread_allocated_get(tsd);
while (count-- != 0) {
te_alloc_rollback(tsd, diff);
uint64_t thread_allocated_after = thread_allocated_get(tsd);
assert_u64_eq(thread_allocated - thread_allocated_after, diff,
"thread event counters are not properly rolled back");
thread_allocated = thread_allocated_after;
}
}
TEST_END
int int
main(void) { main(void) {
return test( return test(
test_next_event_fast, test_next_event_fast);
test_event_rollback);
} }