#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H #define JEMALLOC_INTERNAL_THREAD_EVENT_H #include "jemalloc/internal/tsd.h" /* * Maximum threshold on thread_allocated_next_event_fast, so that there is no * need to check overflow in malloc fast path. (The allocation size in malloc * fast path never exceeds SC_LOOKUP_MAXCLASS.) */ #define THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX \ (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U) /* * The max interval helps make sure that malloc stays on the fast path in the * common case, i.e. thread_allocated < thread_allocated_next_event_fast. * When thread_allocated is within an event's distance to * THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX above, thread_allocated_next_event_fast * is wrapped around and we fall back to the medium-fast path. The max interval * makes sure that we're not staying on the fallback case for too long, even if * there's no active event or if all active events have long wait times. */ #define THREAD_EVENT_MAX_INTERVAL ((uint64_t)(4U << 20)) void thread_event_assert_invariants_debug(tsd_t *tsd); void thread_event_trigger(tsd_t *tsd, bool delay_event); void thread_event_rollback(tsd_t *tsd, size_t diff); void thread_event_update(tsd_t *tsd); void thread_event_boot(); void thread_event_recompute_fast_threshold(tsd_t *tsd); void tsd_thread_event_init(tsd_t *tsd); /* * List of all events, in the following format: * E(event, (condition)) */ #define ITERATE_OVER_ALL_EVENTS \ E(tcache_gc, (TCACHE_GC_INCR_BYTES > 0)) \ E(prof_sample, (config_prof && opt_prof)) \ E(stats_interval, (opt_stats_interval >= 0)) #define E(event, condition) \ C(event##_event_wait) /* List of all thread event counters. */ #define ITERATE_OVER_ALL_COUNTERS \ C(thread_allocated) \ C(thread_allocated_last_event) \ ITERATE_OVER_ALL_EVENTS \ C(prof_sample_last_event) \ C(stats_interval_last_event) /* Getters directly wrap TSD getters. */ #define C(counter) \ JEMALLOC_ALWAYS_INLINE uint64_t \ counter##_get(tsd_t *tsd) { \ return tsd_##counter##_get(tsd); \ } ITERATE_OVER_ALL_COUNTERS #undef C /* * Setters call the TSD pointer getters rather than the TSD setters, so that * the counters can be modified even when TSD state is reincarnated or * minimal_initialized: if an event is triggered in such cases, we will * temporarily delay the event and let it be immediately triggered at the next * allocation call. */ #define C(counter) \ JEMALLOC_ALWAYS_INLINE void \ counter##_set(tsd_t *tsd, uint64_t v) { \ *tsd_##counter##p_get(tsd) = v; \ } ITERATE_OVER_ALL_COUNTERS #undef C /* * For generating _event_wait getter / setter functions for each individual * event. */ #undef E /* * Two malloc fastpath getters -- use the unsafe getters since tsd may be * non-nominal, in which case the fast_threshold will be set to 0. This allows * checking for events and tsd non-nominal in a single branch. * * Note that these can only be used on the fastpath. */ JEMALLOC_ALWAYS_INLINE uint64_t thread_allocated_malloc_fastpath(tsd_t *tsd) { return *tsd_thread_allocatedp_get_unsafe(tsd); } JEMALLOC_ALWAYS_INLINE uint64_t thread_allocated_next_event_malloc_fastpath(tsd_t *tsd) { uint64_t v = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd); assert(v <= THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX); return v; } /* Below 3 for next_event_fast. */ JEMALLOC_ALWAYS_INLINE uint64_t thread_allocated_next_event_fast_get(tsd_t *tsd) { uint64_t v = tsd_thread_allocated_next_event_fast_get(tsd); assert(v <= THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX); return v; } JEMALLOC_ALWAYS_INLINE void thread_allocated_next_event_fast_set(tsd_t *tsd, uint64_t v) { assert(v <= THREAD_ALLOCATED_NEXT_EVENT_FAST_MAX); *tsd_thread_allocated_next_event_fastp_get(tsd) = v; } JEMALLOC_ALWAYS_INLINE void thread_allocated_next_event_fast_set_non_nominal(tsd_t *tsd) { /* * Set the fast threshold to zero when tsd is non-nominal. Use the * unsafe getter as this may get called during tsd init and clean up. */ *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0; } /* For next_event. Setter also updates the fast threshold. */ JEMALLOC_ALWAYS_INLINE uint64_t thread_allocated_next_event_get(tsd_t *tsd) { return tsd_thread_allocated_next_event_get(tsd); } JEMALLOC_ALWAYS_INLINE void thread_allocated_next_event_set(tsd_t *tsd, uint64_t v) { *tsd_thread_allocated_next_eventp_get(tsd) = v; thread_event_recompute_fast_threshold(tsd); } /* * The function checks in debug mode whether the thread event counters are in * a consistent state, which forms the invariants before and after each round * of thread event handling that we can rely on and need to promise. * The invariants are only temporarily violated in the middle of: * (a) thread_event() if an event is triggered (the thread_event_trigger() call * at the end will restore the invariants), * (b) thread_##event##_event_update() (the thread_event_update() call at the * end will restore the invariants), or * (c) thread_event_rollback() if the rollback falls below the last_event (the * thread_event_update() call at the end will restore the invariants). */ JEMALLOC_ALWAYS_INLINE void thread_event_assert_invariants(tsd_t *tsd) { if (config_debug) { thread_event_assert_invariants_debug(tsd); } } JEMALLOC_ALWAYS_INLINE void thread_event(tsd_t *tsd, size_t usize) { thread_event_assert_invariants(tsd); uint64_t thread_allocated_before = thread_allocated_get(tsd); thread_allocated_set(tsd, thread_allocated_before + usize); /* The subtraction is intentionally susceptible to underflow. */ if (likely(usize < thread_allocated_next_event_get(tsd) - thread_allocated_before)) { thread_event_assert_invariants(tsd); } else { thread_event_trigger(tsd, false); } } #define E(event, condition) \ JEMALLOC_ALWAYS_INLINE void \ thread_##event##_event_update(tsd_t *tsd, uint64_t event_wait) { \ thread_event_assert_invariants(tsd); \ assert(condition); \ assert(tsd_nominal(tsd)); \ assert(tsd_reentrancy_level_get(tsd) == 0); \ assert(event_wait > 0U); \ if (THREAD_EVENT_MIN_START_WAIT > 1U && \ unlikely(event_wait < THREAD_EVENT_MIN_START_WAIT)) { \ event_wait = THREAD_EVENT_MIN_START_WAIT; \ } \ if (THREAD_EVENT_MAX_START_WAIT < UINT64_MAX && \ unlikely(event_wait > THREAD_EVENT_MAX_START_WAIT)) { \ event_wait = THREAD_EVENT_MAX_START_WAIT; \ } \ event##_event_wait_set(tsd, event_wait); \ thread_event_update(tsd); \ } ITERATE_OVER_ALL_EVENTS #undef E #endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */