Do not rollback prof idump counter in arena_prof_promote()

This commit is contained in:
Yinan Zhang 2020-04-15 11:08:25 -07:00
parent 0295aa38a2
commit 039bfd4e30
6 changed files with 1 additions and 93 deletions

View File

@ -51,31 +51,6 @@ counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t accumbytes) {
return overflow; return overflow;
} }
JEMALLOC_ALWAYS_INLINE void
counter_rollback(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
/*
* Cancel out as much of the excessive accumbytes increase as possible
* without underflowing. Interval-triggered events occur slightly more
* often than intended as a result of incomplete canceling.
*/
uint64_t a0, a1;
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&counter->accumbytes,
ATOMIC_RELAXED);
do {
a1 = (a0 >= bytes) ? a0 - bytes : 0;
} while (!atomic_compare_exchange_weak_u64(
&counter->accumbytes, &a0, a1, ATOMIC_RELAXED,
ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &counter->mtx);
a0 = counter->accumbytes;
a1 = (a0 >= bytes) ? a0 - bytes : 0;
counter->accumbytes = a1;
malloc_mutex_unlock(tsdn, &counter->mtx);
#endif
}
bool counter_accum_init(counter_accum_t *counter, uint64_t interval); bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
#endif /* JEMALLOC_INTERNAL_COUNTER_H */ #endif /* JEMALLOC_INTERNAL_COUNTER_H */

View File

@ -50,7 +50,6 @@ extern bool prof_booted;
/* Functions only accessed in prof_inlines_a.h */ /* Functions only accessed in prof_inlines_a.h */
bool prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes); bool prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes);
void prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize);
/* Functions only accessed in prof_inlines_b.h */ /* Functions only accessed in prof_inlines_b.h */
prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_init(tsd_t *tsd);

View File

@ -36,15 +36,4 @@ prof_idump_accum(tsdn_t *tsdn, uint64_t accumbytes) {
return prof_idump_accum_impl(tsdn, accumbytes); return prof_idump_accum_impl(tsdn, accumbytes);
} }
JEMALLOC_ALWAYS_INLINE void
prof_idump_rollback(tsdn_t *tsdn, size_t usize) {
cassert(config_prof);
if (prof_interval == 0 || !prof_active_get_unlocked()) {
return;
}
prof_idump_rollback_impl(tsdn, usize);
}
#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ #endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */

View File

@ -1061,8 +1061,6 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
edata_szind_set(edata, szind); edata_szind_set(edata, szind);
emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false); emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
prof_idump_rollback(tsdn, usize);
assert(isalloc(tsdn, ptr) == usize); assert(isalloc(tsdn, ptr) == usize);
} }

View File

@ -50,7 +50,7 @@ bool opt_prof_accum = false;
char opt_prof_prefix[PROF_DUMP_FILENAME_LEN]; char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
bool opt_prof_experimental_use_sys_thread_name = false; bool opt_prof_experimental_use_sys_thread_name = false;
/* Accessed via prof_idump_[accum/rollback](). */ /* Accessed via prof_idump_accum(). */
static counter_accum_t prof_idump_accumulated; static counter_accum_t prof_idump_accumulated;
/* /*
@ -655,16 +655,6 @@ prof_idump_accum_impl(tsdn_t *tsdn, uint64_t accumbytes) {
return counter_accum(tsdn, &prof_idump_accumulated, accumbytes); return counter_accum(tsdn, &prof_idump_accumulated, accumbytes);
} }
void
prof_idump_rollback_impl(tsdn_t *tsdn, size_t usize) {
cassert(config_prof);
/* Rollback is only done on arena_prof_promote of small sizes. */
assert(SC_LARGE_MINCLASS > usize);
return counter_rollback(tsdn, &prof_idump_accumulated,
SC_LARGE_MINCLASS - usize);
}
bool bool
prof_dump_prefix_set(tsdn_t *tsdn, const char *prefix) { prof_dump_prefix_set(tsdn_t *tsdn, const char *prefix) {
cassert(config_prof); cassert(config_prof);

View File

@ -36,48 +36,6 @@ expect_counter_value(counter_accum_t *c, uint64_t v) {
expect_u64_eq(accum, v, "Counter value mismatch"); expect_u64_eq(accum, v, "Counter value mismatch");
} }
TEST_BEGIN(test_counter_rollback) {
uint64_t half_interval = interval / 2;
counter_accum_t c;
counter_accum_init(&c, interval);
tsd_t *tsd = tsd_fetch();
counter_rollback(tsd_tsdn(tsd), &c, half_interval);
bool trigger;
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
expect_b_eq(trigger, false, "Should not trigger");
counter_rollback(tsd_tsdn(tsd), &c, half_interval + 1);
expect_counter_value(&c, 0);
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
expect_b_eq(trigger, false, "Should not trigger");
counter_rollback(tsd_tsdn(tsd), &c, half_interval - 1);
expect_counter_value(&c, 1);
counter_rollback(tsd_tsdn(tsd), &c, 1);
expect_counter_value(&c, 0);
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
expect_b_eq(trigger, false, "Should not trigger");
counter_rollback(tsd_tsdn(tsd), &c, 1);
expect_counter_value(&c, half_interval - 1);
trigger = counter_accum(tsd_tsdn(tsd), &c, half_interval);
expect_b_eq(trigger, false, "Should not trigger");
expect_counter_value(&c, interval - 1);
trigger = counter_accum(tsd_tsdn(tsd), &c, 1);
expect_b_eq(trigger, true, "Should have triggered");
expect_counter_value(&c, 0);
trigger = counter_accum(tsd_tsdn(tsd), &c, interval + 1);
expect_b_eq(trigger, true, "Should have triggered");
expect_counter_value(&c, 1);
}
TEST_END
#define N_THDS (16) #define N_THDS (16)
#define N_ITER_THD (1 << 12) #define N_ITER_THD (1 << 12)
#define ITER_INCREMENT (interval >> 4) #define ITER_INCREMENT (interval >> 4)
@ -123,6 +81,5 @@ int
main(void) { main(void) {
return test( return test(
test_counter_accum, test_counter_accum,
test_counter_rollback,
test_counter_mt); test_counter_mt);
} }