Convert arena->prof_accumbytes synchronization to atomics.
This commit is contained in:
76
include/jemalloc/internal/prof_inlines_a.h
Normal file
76
include/jemalloc/internal/prof_inlines_a.h
Normal file
@@ -0,0 +1,76 @@
|
||||
#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||
#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
bool prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
||||
uint64_t accumbytes);
|
||||
void prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_))
|
||||
JEMALLOC_INLINE bool
|
||||
prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) {
|
||||
cassert(config_prof);
|
||||
|
||||
bool overflow;
|
||||
uint64_t a0, a1;
|
||||
|
||||
/*
|
||||
* If the application allocates fast enough (and/or if idump is slow
|
||||
* enough), extreme overflow here (a1 >= prof_interval * 2) can cause
|
||||
* idump trigger coalescing. This is an intentional mechanism that
|
||||
* avoids rate-limiting allocation.
|
||||
*/
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
do {
|
||||
a0 = atomic_read_u64(&prof_accum->accumbytes);
|
||||
a1 = a0 + accumbytes;
|
||||
assert(a1 >= a0);
|
||||
overflow = (a1 >= prof_interval);
|
||||
if (overflow) {
|
||||
a1 %= prof_interval;
|
||||
}
|
||||
} while (atomic_cas_u64(&prof_accum->accumbytes, a0, a1));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||
a0 = prof_accum->accumbytes;
|
||||
a1 = a0 + accumbytes;
|
||||
overflow = (a1 >= prof_interval);
|
||||
if (overflow) {
|
||||
a1 %= prof_interval;
|
||||
}
|
||||
prof_accum->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||
#endif
|
||||
return overflow;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) {
|
||||
cassert(config_prof);
|
||||
|
||||
/*
|
||||
* Cancel out as much of the excessive prof_accumbytes increase as
|
||||
* possible without underflowing. Interval-triggered dumps occur
|
||||
* slightly more often than intended as a result of incomplete
|
||||
* canceling.
|
||||
*/
|
||||
uint64_t a0, a1;
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
do {
|
||||
a0 = atomic_read_u64(&prof_accum->accumbytes);
|
||||
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
|
||||
usize) : 0;
|
||||
} while (atomic_cas_u64(&prof_accum->accumbytes, a0, a1));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||
a0 = prof_accum->accumbytes;
|
||||
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
|
||||
0;
|
||||
prof_accum->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
|
Reference in New Issue
Block a user