Stop over-reporting memory usage from sampled small allocations

@interwq noticed [while reviewing an earlier PR](https://github.com/jemalloc/jemalloc/pull/2478#discussion_r1256217261)
that I missed modifying this statistics accounting in line with the rest
of the changes from #2459. This is now fixed, such that sampled small
allocations increment the `.nmalloc`/`.ndalloc` of their effective bin
size instead of over-reporting memory usage by attributing all such
allocations to `SC_LARGE_MINCLASS`.
This commit is contained in:
Kevin Svetlitski 2023-07-19 12:30:12 -07:00 committed by Qi Wang
parent ea5b7bea31
commit 07a2eab3ed

View File

@ -293,34 +293,48 @@ arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
static void static void
arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
szind_t index, hindex;
cassert(config_stats); cassert(config_stats);
szind_t index = sz_size2index(usize);
/* This only occurs when we have a sampled small allocation */
if (usize < SC_LARGE_MINCLASS) { if (usize < SC_LARGE_MINCLASS) {
usize = SC_LARGE_MINCLASS; assert(index < SC_NBINS);
} assert(usize >= PAGE && usize % PAGE == 0);
index = sz_size2index(usize); bin_t *bin = arena_get_bin(arena, index, /* binshard */ 0);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nmalloc++;
malloc_mutex_unlock(tsdn, &bin->lock);
} else {
assert(index >= SC_NBINS);
szind_t hindex = index - SC_NBINS;
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].nmalloc, 1); &arena->stats.lstats[hindex].nmalloc, 1);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
} }
static void static void
arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
szind_t index, hindex;
cassert(config_stats); cassert(config_stats);
szind_t index = sz_size2index(usize);
/* This only occurs when we have a sampled small allocation */
if (usize < SC_LARGE_MINCLASS) { if (usize < SC_LARGE_MINCLASS) {
usize = SC_LARGE_MINCLASS; assert(index < SC_NBINS);
} assert(usize >= PAGE && usize % PAGE == 0);
index = sz_size2index(usize); bin_t *bin = arena_get_bin(arena, index, /* binshard */ 0);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.ndalloc++;
malloc_mutex_unlock(tsdn, &bin->lock);
} else {
assert(index >= SC_NBINS);
szind_t hindex = index - SC_NBINS;
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx), locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].ndalloc, 1); &arena->stats.lstats[hindex].ndalloc, 1);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
} }
static void static void
@ -344,9 +358,7 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
if (edata != NULL) { if (edata != NULL) {
if (config_stats) { if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_malloc_stats_update(tsdn, arena, usize); arena_large_malloc_stats_update(tsdn, arena, usize);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
} }
@ -360,10 +372,8 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
void void
arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) { arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) { if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_dalloc_stats_update(tsdn, arena, arena_large_dalloc_stats_update(tsdn, arena,
edata_usize_get(edata)); edata_usize_get(edata));
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
} }
@ -373,9 +383,7 @@ arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t usize = edata_usize_get(edata); size_t usize = edata_usize_get(edata);
if (config_stats) { if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
} }
@ -385,9 +393,7 @@ arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t usize = edata_usize_get(edata); size_t usize = edata_usize_get(edata);
if (config_stats) { if (config_stats) {
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
} }
} }