metadata usage breakdowns: tracking edata and rtree usages

This commit is contained in:
Shirui Cheng 2023-10-10 09:46:23 -07:00 committed by Qi Wang
parent 005f20aa7f
commit 36becb1302
10 changed files with 116 additions and 30 deletions

View File

@ -52,6 +52,8 @@ struct arena_stats_s {
* in pa_shard_stats_t.
*/
size_t base; /* Derived. */
size_t metadata_edata; /* Derived. */
size_t metadata_rtree; /* Derived. */
size_t resident; /* Derived. */
size_t metadata_thp; /* Derived. */
size_t mapped; /* Derived. */

View File

@ -78,6 +78,8 @@ struct base_s {
/* Stats, only maintained if config_stats. */
size_t allocated;
size_t edata_allocated;
size_t rtree_allocated;
size_t resident;
size_t mapped;
/* Number of THP regions touched. */
@ -104,10 +106,12 @@ extent_hooks_t *base_extent_hooks_set(base_t *base,
extent_hooks_t *extent_hooks);
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
void *base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size);
void *b0_alloc_tcache_stack(tsdn_t *tsdn, size_t size);
void b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack);
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *resident, size_t *mapped, size_t *n_thp);
size_t *edata_allocated, size_t *rtree_allocated, size_t *resident,
size_t *mapped, size_t *n_thp);
void base_prefork(tsdn_t *tsdn, base_t *base);
void base_postfork_parent(tsdn_t *tsdn, base_t *base);
void base_postfork_child(tsdn_t *tsdn, base_t *base);

View File

@ -57,6 +57,8 @@ typedef struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t metadata_edata;
size_t metadata_rtree;
size_t metadata_thp;
size_t resident;
size_t mapped;

View File

@ -92,8 +92,10 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
muzzy_decay_ms, nactive, ndirty, nmuzzy);
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
size_t base_allocated, base_edata_allocated, base_rtree_allocated,
base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated,
&base_edata_allocated, &base_rtree_allocated, &base_resident,
&base_mapped, &metadata_thp);
size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
astats->mapped += base_mapped + pac_mapped_sz;
@ -102,6 +104,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
astats->base += base_allocated;
astats->metadata_edata += base_edata_allocated;
astats->metadata_rtree += base_rtree_allocated;
atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
astats->metadata_thp += metadata_thp;

View File

@ -430,6 +430,8 @@ base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
edata_avail_new(&base->edata_avail);
if (config_stats) {
base->edata_allocated = 0;
base->rtree_allocated = 0;
base->allocated = sizeof(base_block_t);
base->resident = PAGE_CEILING(sizeof(base_block_t));
base->mapped = block->size;
@ -482,7 +484,7 @@ base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
static void *
base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t *esn) {
size_t *esn, size_t *ret_usize) {
alignment = QUANTUM_CEILING(alignment);
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
@ -510,6 +512,9 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
if (esn != NULL) {
*esn = (size_t)edata_sn_get(edata);
}
if (ret_usize != NULL) {
*ret_usize = usize;
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
return ret;
@ -525,21 +530,38 @@ label_return:
*/
void *
base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
return base_alloc_impl(tsdn, base, size, alignment, NULL, NULL);
}
edata_t *
base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
size_t esn, usize;
edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
EDATA_ALIGNMENT, &esn);
EDATA_ALIGNMENT, &esn, &usize);
if (edata == NULL) {
return NULL;
}
if (config_stats) {
base->edata_allocated += usize;
}
edata_esn_set(edata, esn);
return edata;
}
void *
base_alloc_rtree(tsdn_t *tsdn, base_t *base, size_t size) {
size_t usize;
void *rtree = base_alloc_impl(tsdn, base, size, CACHELINE, NULL,
&usize);
if (rtree == NULL) {
return NULL;
}
if (config_stats) {
base->rtree_allocated += usize;
}
return rtree;
}
static inline void
b0_alloc_header_size(size_t *header_size, size_t *alignment) {
*alignment = QUANTUM;
@ -573,7 +595,8 @@ b0_alloc_tcache_stack(tsdn_t *tsdn, size_t stack_size) {
b0_alloc_header_size(&header_size, &alignment);
size_t alloc_size = sz_s2u(stack_size + header_size);
void *addr = base_alloc_impl(tsdn, base, alloc_size, alignment, &esn);
void *addr = base_alloc_impl(tsdn, base, alloc_size, alignment, &esn,
NULL);
if (addr == NULL) {
edata_avail_insert(&base->edata_avail, edata);
return NULL;
@ -609,14 +632,18 @@ b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack) {
}
void
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
size_t *edata_allocated, size_t *rtree_allocated, size_t *resident,
size_t *mapped, size_t *n_thp) {
cassert(config_stats);
malloc_mutex_lock(tsdn, &base->mtx);
assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped);
assert(base->edata_allocated + base->rtree_allocated <= base->allocated);
*allocated = base->allocated;
*edata_allocated = base->edata_allocated;
*rtree_allocated = base->rtree_allocated;
*resident = base->resident;
*mapped = base->mapped;
*n_thp = base->n_thp;

View File

@ -294,6 +294,8 @@ CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
CTL_PROTO(stats_arenas_i_muzzy_purged)
CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_edata)
CTL_PROTO(stats_arenas_i_metadata_rtree)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
@ -307,6 +309,8 @@ CTL_PROTO(stats_background_thread_num_threads)
CTL_PROTO(stats_background_thread_num_runs)
CTL_PROTO(stats_background_thread_run_interval)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_metadata_edata)
CTL_PROTO(stats_metadata_rtree)
CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
@ -801,6 +805,8 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
{NAME("base"), CTL(stats_arenas_i_base)},
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_edata"), CTL(stats_arenas_i_metadata_edata)},
{NAME("metadata_rtree"), CTL(stats_arenas_i_metadata_rtree)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
{NAME("tcache_stashed_bytes"),
@ -846,6 +852,8 @@ static const ctl_named_node_t stats_node[] = {
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("metadata"), CTL(stats_metadata)},
{NAME("metadata_edata"), CTL(stats_metadata_edata)},
{NAME("metadata_rtree"), CTL(stats_metadata_rtree)},
{NAME("metadata_thp"), CTL(stats_metadata_thp)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
@ -1138,6 +1146,10 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
sdstats->astats.base += astats->astats.base;
sdstats->astats.metadata_edata += astats->astats
.metadata_edata;
sdstats->astats.metadata_rtree += astats->astats
.metadata_rtree;
sdstats->astats.resident += astats->astats.resident;
sdstats->astats.metadata_thp += astats->astats.metadata_thp;
ctl_accum_atomic_zu(&sdstats->astats.internal,
@ -1341,6 +1353,10 @@ ctl_refresh(tsdn_t *tsdn) {
ctl_stats->metadata = ctl_sarena->astats->astats.base +
atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED);
ctl_stats->metadata_edata = ctl_sarena->astats->astats
.metadata_edata;
ctl_stats->metadata_rtree = ctl_sarena->astats->astats
.metadata_rtree;
ctl_stats->resident = ctl_sarena->astats->astats.resident;
ctl_stats->metadata_thp =
ctl_sarena->astats->astats.metadata_thp;
@ -3599,6 +3615,10 @@ label_return:
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
CTL_RO_CGEN(config_stats, stats_metadata_edata, ctl_stats->metadata_edata,
size_t)
CTL_RO_CGEN(config_stats, stats_metadata_rtree, ctl_stats->metadata_rtree,
size_t)
CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
@ -3664,6 +3684,10 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_base,
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_edata,
arenas_i(mib[2])->astats->astats.metadata_edata, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_rtree,
arenas_i(mib[2])->astats->astats.metadata_rtree, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,

View File

@ -29,14 +29,14 @@ rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
static rtree_node_elm_t *
rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base,
nelms * sizeof(rtree_node_elm_t), CACHELINE);
return (rtree_node_elm_t *)base_alloc_rtree(tsdn, rtree->base,
nelms * sizeof(rtree_node_elm_t));
}
static rtree_leaf_elm_t *
rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base,
nelms * sizeof(rtree_leaf_elm_t), CACHELINE);
return (rtree_leaf_elm_t *)base_alloc_rtree(tsdn, rtree->base,
nelms * sizeof(rtree_leaf_elm_t));
}
static rtree_node_elm_t *

View File

@ -1052,7 +1052,8 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
const char *dss;
ssize_t dirty_decay_ms, muzzy_decay_ms;
size_t page, pactive, pdirty, pmuzzy, mapped, retained;
size_t base, internal, resident, metadata_thp, extent_avail;
size_t base, internal, resident, metadata_edata, metadata_rtree,
metadata_thp, extent_avail;
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
size_t small_allocated;
@ -1352,6 +1353,8 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
GET_AND_EMIT_MEM_STAT(retained)
GET_AND_EMIT_MEM_STAT(base)
GET_AND_EMIT_MEM_STAT(internal)
GET_AND_EMIT_MEM_STAT(metadata_edata)
GET_AND_EMIT_MEM_STAT(metadata_rtree)
GET_AND_EMIT_MEM_STAT(metadata_thp)
GET_AND_EMIT_MEM_STAT(tcache_bytes)
GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
@ -1696,8 +1699,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
* These should be deleted. We keep them around for a while, to aid in
* the transition to the emitter code.
*/
size_t allocated, active, metadata, metadata_thp, resident, mapped,
retained;
size_t allocated, active, metadata, metadata_edata, metadata_rtree,
metadata_thp, resident, mapped, retained;
size_t num_background_threads;
size_t zero_reallocs;
uint64_t background_thread_num_runs, background_thread_run_interval;
@ -1705,6 +1708,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.metadata_edata", &metadata_edata, size_t);
CTL_GET("stats.metadata_rtree", &metadata_rtree, size_t);
CTL_GET("stats.metadata_thp", &metadata_thp, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
@ -1730,6 +1735,10 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_json_kv(emitter, "allocated", emitter_type_size, &allocated);
emitter_json_kv(emitter, "active", emitter_type_size, &active);
emitter_json_kv(emitter, "metadata", emitter_type_size, &metadata);
emitter_json_kv(emitter, "metadata_edata", emitter_type_size,
&metadata_edata);
emitter_json_kv(emitter, "metadata_rtree", emitter_type_size,
&metadata_rtree);
emitter_json_kv(emitter, "metadata_thp", emitter_type_size,
&metadata_thp);
emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
@ -1739,9 +1748,10 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
&zero_reallocs);
emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
"metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
"retained: %zu\n", allocated, active, metadata, metadata_thp,
resident, mapped, retained);
"metadata: %zu (n_thp %zu, edata %zu, rtree %zu), resident: %zu, "
"mapped: %zu, retained: %zu\n", allocated, active, metadata,
metadata_thp, metadata_edata, metadata_rtree, resident, mapped,
retained);
/* Strange behaviors */
emitter_table_printf(emitter,

View File

@ -28,7 +28,8 @@ static extent_hooks_t hooks_not_null = {
TEST_BEGIN(test_base_hooks_default) {
base_t *base;
size_t allocated0, allocated1, resident, mapped, n_thp;
size_t allocated0, allocated1, edata_allocated,
rtree_allocated, resident, mapped, n_thp;
tsdn_t *tsdn = tsd_tsdn(tsd_fetch());
base = base_new(tsdn, 0,
@ -36,8 +37,8 @@ TEST_BEGIN(test_base_hooks_default) {
/* metadata_use_hooks */ true);
if (config_stats) {
base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
&n_thp);
base_stats_get(tsdn, base, &allocated0, &edata_allocated,
&rtree_allocated, &resident, &mapped, &n_thp);
expect_zu_ge(allocated0, sizeof(base_t),
"Base header should count as allocated");
if (opt_metadata_thp == metadata_thp_always) {
@ -50,8 +51,8 @@ TEST_BEGIN(test_base_hooks_default) {
"Unexpected base_alloc() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
&n_thp);
base_stats_get(tsdn, base, &allocated1, &edata_allocated,
&rtree_allocated, &resident, &mapped, &n_thp);
expect_zu_ge(allocated1 - allocated0, 42,
"At least 42 bytes were allocated by base_alloc()");
}
@ -63,7 +64,8 @@ TEST_END
TEST_BEGIN(test_base_hooks_null) {
extent_hooks_t hooks_orig;
base_t *base;
size_t allocated0, allocated1, resident, mapped, n_thp;
size_t allocated0, allocated1, edata_allocated,
rtree_allocated, resident, mapped, n_thp;
extent_hooks_prep();
try_dalloc = false;
@ -79,8 +81,8 @@ TEST_BEGIN(test_base_hooks_null) {
expect_ptr_not_null(base, "Unexpected base_new() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated0, &resident, &mapped,
&n_thp);
base_stats_get(tsdn, base, &allocated0, &edata_allocated,
&rtree_allocated, &resident, &mapped, &n_thp);
expect_zu_ge(allocated0, sizeof(base_t),
"Base header should count as allocated");
if (opt_metadata_thp == metadata_thp_always) {
@ -93,8 +95,8 @@ TEST_BEGIN(test_base_hooks_null) {
"Unexpected base_alloc() failure");
if (config_stats) {
base_stats_get(tsdn, base, &allocated1, &resident, &mapped,
&n_thp);
base_stats_get(tsdn, base, &allocated1, &edata_allocated,
&rtree_allocated, &resident, &mapped, &n_thp);
expect_zu_ge(allocated1 - allocated0, 42,
"At least 42 bytes were allocated by base_alloc()");
}

View File

@ -4,7 +4,8 @@
#define STRINGIFY(x) STRINGIFY_HELPER(x)
TEST_BEGIN(test_stats_summary) {
size_t sz, allocated, active, resident, mapped;
size_t sz, allocated, active, resident, mapped,
metadata, metadata_edata, metadata_rtree;
int expected = config_stats ? 0 : ENOENT;
sz = sizeof(size_t);
@ -17,6 +18,13 @@ TEST_BEGIN(test_stats_summary) {
expect_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
expect_d_eq(mallctl("stats.metadata", (void *)&metadata, &sz, NULL, 0),
expected, "Unexpected mallctl() result");
expect_d_eq(mallctl("stats.metadata_edata", (void *)&metadata_edata,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
expect_d_eq(mallctl("stats.metadata_rtree", (void *)&metadata_rtree,
&sz, NULL, 0), expected, "Unexpected mallctl() result");
if (config_stats) {
expect_zu_le(allocated, active,
"allocated should be no larger than active");
@ -24,6 +32,9 @@ TEST_BEGIN(test_stats_summary) {
"active should be less than resident");
expect_zu_lt(active, mapped,
"active should be less than mapped");
expect_zu_le(metadata_edata + metadata_rtree, metadata,
"the sum of metadata_edata and metadata_rtree "
"should be no larger than metadata");
}
}
TEST_END