Do not rollback prof idump counter in arena_prof_promote()

This commit is contained in:
Yinan Zhang
2020-04-15 11:08:25 -07:00
parent 0295aa38a2
commit 039bfd4e30
6 changed files with 1 additions and 93 deletions

View File

@@ -51,31 +51,6 @@ counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
return overflow;
}
JEMALLOC_ALWAYS_INLINE void
counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
/*
* Cancel out as much of the excessive accumbytes increase as possible
* without underflowing. Interval-triggered events occur slightly more
* often than intended as a result of incomplete canceling.
*/
uint64_t a0, a1;
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&counter->accumbytes,
ATOMIC_RELAXED);
do {
a1 = (a0 >= bytes) ? a0 - bytes : 0;
} while (!atomic_compare_exchange_weak_u64(
&counter->accumbytes, &a0, a1, ATOMIC_RELAXED,
ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &counter->mtx);
a0 = counter->accumbytes;
a1 = (a0 >= bytes) ? a0 - bytes : 0;
counter->accumbytes = a1;
malloc_mutex_unlock(tsdn, &counter->mtx);
#endif
}
bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
#endif /* JEMALLOC_INTERNAL_COUNTER_H */

View File

@@ -50,7 +50,6 @@ extern bool prof_booted;
/* Functions only accessed in prof_inlines_a.h */
bool prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes);
void prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize);
/* Functions only accessed in prof_inlines_b.h */
prof_tdata_t *prof_tdata_init(tsd_t *tsd);

View File

@@ -36,15 +36,4 @@ prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes) {
return prof_idump_accum_impl(tsdn, accumbytes);
}
JEMALLOC_ALWAYS_INLINE void
prof_idump_rollback(tsdn_t *tsdn, size_t usize) {
cassert(config_prof);
if (prof_interval == 0 || !prof_active_get_unlocked()) {
return;
}
prof_idump_rollback_impl(tsdn, usize);
}
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */