2017-02-13 09:03:46 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
|
|
|
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
|
|
|
|
2017-05-24 03:28:19 +08:00
|
|
|
#include "jemalloc/internal/mutex.h"
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline bool
|
2018-05-03 17:40:53 +08:00
|
|
|
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
|
|
|
uint64_t accumbytes) {
|
2017-02-13 09:03:46 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
|
|
|
bool overflow;
|
|
|
|
uint64_t a0, a1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the application allocates fast enough (and/or if idump is slow
|
|
|
|
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
|
|
|
* idump trigger coalescing. This is an intentional mechanism that
|
|
|
|
* avoids rate-limiting allocation.
|
|
|
|
*/
|
|
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
2017-04-05 09:08:58 +08:00
|
|
|
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
2017-02-13 09:03:46 +08:00
|
|
|
do {
|
|
|
|
a1 = a0 + accumbytes;
|
|
|
|
assert(a1 >= a0);
|
|
|
|
overflow = (a1 >= prof_interval);
|
|
|
|
if (overflow) {
|
|
|
|
a1 %= prof_interval;
|
|
|
|
}
|
2017-04-05 09:08:58 +08:00
|
|
|
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
|
|
|
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
2017-02-13 09:03:46 +08:00
|
|
|
#else
|
|
|
|
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
|
|
|
a0 = prof_accum->accumbytes;
|
|
|
|
a1 = a0 + accumbytes;
|
|
|
|
overflow = (a1 >= prof_interval);
|
|
|
|
if (overflow) {
|
|
|
|
a1 %= prof_interval;
|
|
|
|
}
|
|
|
|
prof_accum->accumbytes = a1;
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
|
|
|
#endif
|
|
|
|
return overflow;
|
|
|
|
}
|
|
|
|
|
2017-04-22 00:37:34 +08:00
|
|
|
static inline void
|
2018-05-03 17:40:53 +08:00
|
|
|
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
|
|
|
size_t usize) {
|
2017-02-13 09:03:46 +08:00
|
|
|
cassert(config_prof);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel out as much of the excessive prof_accumbytes increase as
|
|
|
|
* possible without underflowing. Interval-triggered dumps occur
|
|
|
|
* slightly more often than intended as a result of incomplete
|
|
|
|
* canceling.
|
|
|
|
*/
|
|
|
|
uint64_t a0, a1;
|
|
|
|
#ifdef JEMALLOC_ATOMIC_U64
|
2017-04-05 09:08:58 +08:00
|
|
|
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
2017-02-13 09:03:46 +08:00
|
|
|
do {
|
2018-07-12 07:05:58 +08:00
|
|
|
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
|
|
|
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
2017-04-05 09:08:58 +08:00
|
|
|
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
|
|
|
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
2017-02-13 09:03:46 +08:00
|
|
|
#else
|
|
|
|
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
|
|
|
a0 = prof_accum->accumbytes;
|
2018-07-12 07:05:58 +08:00
|
|
|
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
|
|
|
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
2017-02-13 09:03:46 +08:00
|
|
|
prof_accum->accumbytes = a1;
|
|
|
|
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-04-07 04:45:37 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
prof_active_get_unlocked(void) {
|
|
|
|
/*
|
|
|
|
* Even if opt_prof is true, sampling can be temporarily disabled by
|
|
|
|
* setting prof_active to false. No locking is used when reading
|
|
|
|
* prof_active in the fast path, so there are no guarantees regarding
|
|
|
|
* how long it will take for all threads to notice state changes.
|
|
|
|
*/
|
|
|
|
return prof_active;
|
|
|
|
}
|
|
|
|
|
2017-02-13 09:03:46 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|