Avoid arena_prof_accum()-related locking when possible.

Refactor arena_prof_accum() and its callers to avoid arena locking when
prof_interval is 0 (as when profiling is disabled).

Reported by Ben Maurer.
This commit is contained in:
Jason Evans
2012-11-13 12:56:27 -08:00
parent 556ddc7fa9
commit a3b3386ddd
5 changed files with 50 additions and 36 deletions

View File

@@ -1321,21 +1321,6 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
return (arena_run_reg_alloc(bin->runcur, bin_info));
}
void
arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
if (config_prof && prof_interval != 0) {
arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) {
prof_idump();
arena->prof_accumbytes -= prof_interval;
}
}
}
void
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
uint64_t prof_accumbytes)
@@ -1347,11 +1332,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
assert(tbin->ncached == 0);
if (config_prof) {
malloc_mutex_lock(&arena->lock);
if (config_prof)
arena_prof_accum(arena, prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
}
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
@@ -1459,11 +1441,8 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++;
}
malloc_mutex_unlock(&bin->lock);
if (config_prof && isthreaded == false) {
malloc_mutex_lock(&arena->lock);
if (config_prof && isthreaded == false)
arena_prof_accum(arena, size);
malloc_mutex_unlock(&arena->lock);
}
if (zero == false) {
if (config_fill) {
@@ -1507,7 +1486,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
if (config_prof)
arena_prof_accum(arena, size);
arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
if (zero == false) {

View File

@@ -26,7 +26,7 @@ bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[PATH_MAX + 1];
uint64_t prof_interval;
uint64_t prof_interval = 0;
bool prof_promote;
/*
@@ -1206,13 +1206,11 @@ prof_boot1(void)
*/
opt_prof = true;
opt_prof_gdump = false;
prof_interval = 0;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
} else
prof_interval = 0;
}
}
prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);

View File

@@ -97,9 +97,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
arena_bin_t *bin = &arena->bins[binind];
if (config_prof && arena == tcache->arena) {
malloc_mutex_lock(&arena->lock);
arena_prof_accum(arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&arena->lock);
tcache->prof_accumbytes = 0;
}
@@ -180,7 +178,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
malloc_mutex_lock(&arena->lock);
if ((config_prof || config_stats) && arena == tcache->arena) {
if (config_prof) {
arena_prof_accum(arena,
arena_prof_accum_locked(arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
@@ -343,11 +341,8 @@ tcache_destroy(tcache_t *tcache)
}
}
if (config_prof && tcache->prof_accumbytes > 0) {
malloc_mutex_lock(&tcache->arena->lock);
if (config_prof && tcache->prof_accumbytes > 0)
arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
malloc_mutex_unlock(&tcache->arena->lock);
}
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {