Convert arena->prof_accumbytes synchronization to atomics.

This commit is contained in:
Jason Evans
2017-02-12 17:03:46 -08:00
parent b779522b9b
commit fa2d64c94b
15 changed files with 128 additions and 59 deletions

View File

@@ -1148,19 +1148,7 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
extent_usize_set(extent, usize);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
malloc_mutex_lock(tsdn, &arena->lock);
if (arena->prof_accumbytes >= LARGE_MINCLASS - usize) {
arena->prof_accumbytes -= LARGE_MINCLASS - usize;
} else {
arena->prof_accumbytes = 0;
}
malloc_mutex_unlock(tsdn, &arena->lock);
prof_accum_cancel(tsdn, &arena->prof_accum, usize);
assert(isalloc(tsdn, extent, ptr) == usize);
}
@@ -1574,7 +1562,9 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
if (config_prof) {
arena->prof_accumbytes = 0;
if (prof_accum_init(tsdn, &arena->prof_accum)) {
goto label_error;
}
}
if (config_cache_oblivious) {

View File

@@ -1753,6 +1753,20 @@ prof_fdump(void) {
prof_dump(tsd, false, filename, opt_prof_leak);
}
bool
prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
cassert(config_prof);
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
WITNESS_RANK_PROF_ACCUM)) {
return true;
}
#endif
prof_accum->accumbytes = 0;
return false;
}
void
prof_idump(tsdn_t *tsdn) {
tsd_t *tsd;

View File

@@ -200,7 +200,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
}
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
idump = arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}