Add surplus reading API for thread event lookahead

This commit is contained in:
Yinan Zhang 2020-03-12 15:24:36 -07:00
parent f805468957
commit c6f59e9bb4

View File

@ -226,12 +226,56 @@ te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) {
}
}
/*
* The lookahead functionality facilitates events to be able to lookahead, i.e.
* without touching the event counters, to determine whether an event would be
* triggered. The event counters are not advanced until the end of the
* allocation / deallocation calls, so the lookahead can be useful if some
* preparation work for some event must be done early in the allocation /
* deallocation calls.
*
* Currently only the profiling sampling event needs the lookahead
* functionality, so we don't yet define general purpose lookahead functions.
*
* Surplus is a terminology referring to the amount of bytes beyond what's
* needed for triggering an event, which can be a useful quantity to have in
* general when lookahead is being called.
*/
JEMALLOC_ALWAYS_INLINE bool
te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
size_t *surplus) {
if (surplus != NULL) {
/*
* This is a dead store: the surplus will be overwritten before
* any read. The initialization suppresses compiler warnings.
* Meanwhile, using SIZE_MAX to initialize is good for
* debugging purpose, because a valid surplus value is strictly
* less than usize, which is at most SIZE_MAX.
*/
*surplus = SIZE_MAX;
}
if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) {
return false;
}
/* The subtraction is intentionally susceptible to underflow. */
uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize -
tsd_thread_allocated_last_event_get(tsd);
uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd);
if (accumbytes < sample_wait) {
return false;
}
assert(accumbytes - sample_wait < (uint64_t)usize);
if (surplus != NULL) {
*surplus = (size_t)(accumbytes - sample_wait);
}
return true;
}
JEMALLOC_ALWAYS_INLINE bool
te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) {
assert(usize == sz_s2u(usize));
return tsd_thread_allocated_get(tsd) + usize -
tsd_thread_allocated_last_event_get(tsd) >=
tsd_prof_sample_event_wait_get(tsd);
return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL);
}
JEMALLOC_ALWAYS_INLINE void