Improve interval-based profile dump triggering.

When an allocation is large enough to trigger multiple dumps, use
modular math rather than subtraction to reset the interval counter.
Prior to this change, it was possible for a single allocation to cause
many subsequent allocations to all trigger profile dumps.

When updating usable size for a sampled object, try to cancel out
the difference between LARGE_MINCLASS and usable size from the interval
counter.
This commit is contained in:
Jason Evans 2016-05-28 17:29:03 -07:00
parent ed2c2427a7
commit d28e5a6696
2 changed files with 15 additions and 1 deletions

View File

@ -984,7 +984,7 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
arena->prof_accumbytes += accumbytes; arena->prof_accumbytes += accumbytes;
if (arena->prof_accumbytes >= prof_interval) { if (arena->prof_accumbytes >= prof_interval) {
arena->prof_accumbytes -= prof_interval; arena->prof_accumbytes %= prof_interval;
return (true); return (true);
} }
return (false); return (false);

View File

@ -2258,6 +2258,7 @@ void
arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr, arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
size_t usize) size_t usize)
{ {
arena_t *arena = extent_arena_get(extent);
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
@ -2266,6 +2267,19 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
extent_usize_set(extent, usize); extent_usize_set(extent, usize);
/*
* Cancel out as much of the excessive prof_accumbytes increase as
* possible without underflowing. Interval-triggered dumps occur
* slightly more often than intended as a result of incomplete
* canceling.
*/
malloc_mutex_lock(tsdn, &arena->lock);
if (arena->prof_accumbytes >= LARGE_MINCLASS - usize)
arena->prof_accumbytes -= LARGE_MINCLASS - usize;
else
arena->prof_accumbytes = 0;
malloc_mutex_unlock(tsdn, &arena->lock);
assert(isalloc(tsdn, extent, ptr) == usize); assert(isalloc(tsdn, extent, ptr) == usize);
} }