Derive {allocated,nmalloc,ndalloc,nrequests}_large stats.
This mildly reduces stats update overhead during normal operation.
This commit is contained in:
parent
ab25d3c987
commit
d433471f58
@ -104,10 +104,10 @@ struct arena_stats_s {
|
||||
size_t internal;
|
||||
size_t resident; /* Derived. */
|
||||
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
size_t allocated_large; /* Derived. */
|
||||
uint64_t nmalloc_large; /* Derived. */
|
||||
uint64_t ndalloc_large; /* Derived. */
|
||||
uint64_t nrequests_large; /* Derived. */
|
||||
|
||||
/* Number of bytes cached in tcache associated with this arena. */
|
||||
size_t tcache_bytes; /* Derived. */
|
||||
|
45
src/arena.c
45
src/arena.c
@ -136,8 +136,6 @@ void
|
||||
arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
|
||||
szind_t szind, uint64_t nrequests) {
|
||||
arena_stats_lock(tsdn, arena_stats);
|
||||
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->nrequests_large,
|
||||
nrequests);
|
||||
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
|
||||
NBINS].nrequests, nrequests);
|
||||
arena_stats_unlock(tsdn, arena_stats);
|
||||
@ -160,14 +158,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *decay_time, size_t *nactive, size_t *ndirty,
|
||||
arena_stats_t *astats, malloc_bin_stats_t *bstats,
|
||||
malloc_large_stats_t *lstats) {
|
||||
size_t base_allocated, base_resident, base_mapped;
|
||||
unsigned i;
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
arena_basic_stats_merge(tsdn, arena, nthreads, dss, decay_time,
|
||||
nactive, ndirty);
|
||||
|
||||
size_t base_allocated, base_resident, base_mapped;
|
||||
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
|
||||
&base_mapped);
|
||||
|
||||
@ -196,15 +192,30 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
astats->nrequests_large += arena_stats_read_u64(tsdn, &arena->stats,
|
||||
&arena->stats.nrequests_large);
|
||||
|
||||
for (i = 0; i < NSIZES - NBINS; i++) {
|
||||
lstats[i].nmalloc += arena_stats_read_u64(tsdn, &arena->stats,
|
||||
astats->allocated_large = 0;
|
||||
astats->nmalloc_large = 0;
|
||||
astats->ndalloc_large = 0;
|
||||
astats->nrequests_large = 0;
|
||||
for (szind_t i = 0; i < NSIZES - NBINS; i++) {
|
||||
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
|
||||
&arena->stats.lstats[i].nmalloc);
|
||||
lstats[i].ndalloc += arena_stats_read_u64(tsdn, &arena->stats,
|
||||
lstats[i].nmalloc += nmalloc;
|
||||
astats->nmalloc_large += nmalloc;
|
||||
|
||||
uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
|
||||
&arena->stats.lstats[i].ndalloc);
|
||||
lstats[i].nrequests += arena_stats_read_u64(tsdn, &arena->stats,
|
||||
lstats[i].ndalloc += ndalloc;
|
||||
astats->ndalloc_large += ndalloc;
|
||||
|
||||
uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
|
||||
&arena->stats.lstats[i].nrequests);
|
||||
lstats[i].curlextents += arena_stats_read_zu(tsdn,
|
||||
lstats[i].nrequests += nrequests;
|
||||
astats->nrequests_large += nrequests;
|
||||
|
||||
size_t curlextents = arena_stats_read_zu(tsdn,
|
||||
&arena->stats, &arena->stats.lstats[i].curlextents);
|
||||
lstats[i].curlextents += curlextents;
|
||||
astats->allocated_large += curlextents * index2size(i);
|
||||
}
|
||||
|
||||
arena_stats_unlock(tsdn, &arena->stats);
|
||||
@ -217,7 +228,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
astats->tcache_bytes = 0;
|
||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||
ql_foreach(tcache, &arena->tcache_ql, link) {
|
||||
for (i = 0; i < nhbins; i++) {
|
||||
for (szind_t i = 0; i < nhbins; i++) {
|
||||
tbin = &tcache->tbins[i];
|
||||
astats->tcache_bytes += tbin->ncached *
|
||||
index2size(i);
|
||||
@ -226,7 +237,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||
}
|
||||
|
||||
for (i = 0; i < NBINS; i++) {
|
||||
for (szind_t i = 0; i < NBINS; i++) {
|
||||
arena_bin_t *bin = &arena->bins[i];
|
||||
|
||||
malloc_mutex_lock(tsdn, &bin->lock);
|
||||
@ -347,10 +358,6 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
||||
index = size2index(usize);
|
||||
hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||
|
||||
arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.nmalloc_large,
|
||||
1);
|
||||
arena_stats_add_zu(tsdn, &arena->stats, &arena->stats.allocated_large,
|
||||
usize);
|
||||
arena_stats_add_u64(tsdn, &arena->stats,
|
||||
&arena->stats.lstats[hindex].nmalloc, 1);
|
||||
arena_stats_add_u64(tsdn, &arena->stats,
|
||||
@ -371,10 +378,6 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
||||
index = size2index(usize);
|
||||
hindex = (index >= NBINS) ? index - NBINS : 0;
|
||||
|
||||
arena_stats_add_u64(tsdn, &arena->stats, &arena->stats.ndalloc_large,
|
||||
1);
|
||||
arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.allocated_large,
|
||||
usize);
|
||||
arena_stats_add_u64(tsdn, &arena->stats,
|
||||
&arena->stats.lstats[hindex].ndalloc, 1);
|
||||
arena_stats_sub_zu(tsdn, &arena->stats,
|
||||
@ -389,8 +392,6 @@ arena_large_reset_stats_cancel(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
||||
cassert(config_stats);
|
||||
|
||||
arena_stats_lock(tsdn, &arena->stats);
|
||||
arena_stats_sub_u64(tsdn, &arena->stats, &arena->stats.ndalloc_large,
|
||||
1);
|
||||
arena_stats_sub_u64(tsdn, &arena->stats,
|
||||
&arena->stats.lstats[hindex].ndalloc, 1);
|
||||
arena_stats_unlock(tsdn, &arena->stats);
|
||||
|
Loading…
Reference in New Issue
Block a user