Improve interval-based profile dump triggering.
When an allocation is large enough to trigger multiple dumps, use modular math rather than subtraction to reset the interval counter. Prior to this change, it was possible for a single allocation to cause many subsequent allocations to all trigger profile dumps. When updating usable size for a sampled object, try to cancel out the difference between LARGE_MINCLASS and usable size from the interval counter.
This commit is contained in:
parent
ed2c2427a7
commit
d28e5a6696
@ -984,7 +984,7 @@ arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes)
|
||||
|
||||
arena->prof_accumbytes += accumbytes;
|
||||
if (arena->prof_accumbytes >= prof_interval) {
|
||||
arena->prof_accumbytes -= prof_interval;
|
||||
arena->prof_accumbytes %= prof_interval;
|
||||
return (true);
|
||||
}
|
||||
return (false);
|
||||
|
14
src/arena.c
14
src/arena.c
@ -2258,6 +2258,7 @@ void
|
||||
arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
size_t usize)
|
||||
{
|
||||
arena_t *arena = extent_arena_get(extent);
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
@ -2266,6 +2267,19 @@ arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
|
||||
extent_usize_set(extent, usize);
|
||||
|
||||
/*
|
||||
* Cancel out as much of the excessive prof_accumbytes increase as
|
||||
* possible without underflowing. Interval-triggered dumps occur
|
||||
* slightly more often than intended as a result of incomplete
|
||||
* canceling.
|
||||
*/
|
||||
malloc_mutex_lock(tsdn, &arena->lock);
|
||||
if (arena->prof_accumbytes >= LARGE_MINCLASS - usize)
|
||||
arena->prof_accumbytes -= LARGE_MINCLASS - usize;
|
||||
else
|
||||
arena->prof_accumbytes = 0;
|
||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||
|
||||
assert(isalloc(tsdn, extent, ptr) == usize);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user