From 4acd75a694173186e9e0399d2855f05ce8553008 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Mon, 23 Mar 2015 17:25:57 -0700 Subject: [PATCH] Add the "stats.allocated" mallctl. --- ChangeLog | 2 ++ doc/jemalloc.xml.in | 23 +++++++++++++-- include/jemalloc/internal/base.h | 2 +- include/jemalloc/internal/ctl.h | 1 + include/jemalloc/internal/private_symbols.txt | 2 +- src/base.c | 29 ++++++++++++++----- src/ctl.c | 23 ++++++++++----- src/stats.c | 8 +++-- 8 files changed, 67 insertions(+), 23 deletions(-) diff --git a/ChangeLog b/ChangeLog index a462d025..26075766 100644 --- a/ChangeLog +++ b/ChangeLog @@ -63,6 +63,8 @@ found in the git revision history: - Add metadata statistics, which are accessible via the "stats.metadata", "stats.arenas..metadata.mapped", and "stats.arenas..metadata.allocated" mallctls. + - Add the "stats.resident" mallctl, which reports the upper limit of + physically resident memory mapped by the allocator. - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump feature on/off during program execution. - Add sdallocx(), which implements sized deallocation. The primary diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in index 01ac38c3..adff6a4d 100644 --- a/doc/jemalloc.xml.in +++ b/doc/jemalloc.xml.in @@ -1938,6 +1938,23 @@ malloc_conf = "xmalloc:true";]]> linkend="stats.arenas.i.metadata.allocated">stats.arenas.<i>.metadata.allocated). + + + stats.resident + (size_t) + r- + [] + + Maximum number of bytes in physically resident data + pages mapped by the allocator, comprising all pages dedicated to + allocator metadata, pages backing active allocations, and unused dirty + pages. This is a maximum rather than precise because pages may not + actually be physically resident if they correspond to demand-zeroed + virtual memory that has not yet been touched. This is a multiple of the + page size, and is larger than stats.active. + + stats.mapped @@ -1945,10 +1962,10 @@ malloc_conf = "xmalloc:true";]]> r- [] - Total number of bytes in chunks mapped on behalf of the - application. This is a multiple of the chunk size, and is at least as + Total number of bytes in active chunks mapped by the + allocator. This is a multiple of the chunk size, and is at least as large as stats.active. This + linkend="stats.resident">stats.resident. This does not include inactive chunks. diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h index bec76b32..39e46ee4 100644 --- a/include/jemalloc/internal/base.h +++ b/include/jemalloc/internal/base.h @@ -10,7 +10,7 @@ #ifdef JEMALLOC_H_EXTERNS void *base_alloc(size_t size); -size_t base_allocated_get(void); +void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped); bool base_boot(void); void base_prefork(void); void base_postfork_parent(void); diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h index ab9c9862..7c2a4bea 100644 --- a/include/jemalloc/internal/ctl.h +++ b/include/jemalloc/internal/ctl.h @@ -53,6 +53,7 @@ struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; + size_t resident; size_t mapped; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index bc0f2a6a..aaf69786 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -111,11 +111,11 @@ atomic_sub_uint32 atomic_sub_uint64 atomic_sub_z base_alloc -base_allocated_get base_boot base_postfork_child base_postfork_parent base_prefork +base_stats_get bitmap_full bitmap_get bitmap_info_init diff --git a/src/base.c b/src/base.c index 01c62df4..1a9b829a 100644 --- a/src/base.c +++ b/src/base.c @@ -8,6 +8,8 @@ static malloc_mutex_t base_mtx; static extent_tree_t base_avail_szad; static extent_node_t *base_nodes; static size_t base_allocated; +static size_t base_resident; +static size_t base_mapped; /******************************************************************************/ @@ -54,11 +56,15 @@ base_chunk_alloc(size_t minsize) base_node_dalloc(node); return (NULL); } + base_mapped += csize; if (node == NULL) { + node = (extent_node_t *)addr; + addr = (void *)((uintptr_t)addr + nsize); csize -= nsize; - node = (extent_node_t *)((uintptr_t)addr + csize); - if (config_stats) + if (config_stats) { base_allocated += nsize; + base_resident += PAGE_CEILING(nsize); + } } extent_node_init(node, NULL, addr, csize, true); return (node); @@ -106,23 +112,30 @@ base_alloc(size_t size) extent_tree_szad_insert(&base_avail_szad, node); } else base_node_dalloc(node); - if (config_stats) + if (config_stats) { base_allocated += csize; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. + */ + base_resident += PAGE_CEILING((uintptr_t)ret + csize) - + PAGE_CEILING((uintptr_t)ret); + } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); label_return: malloc_mutex_unlock(&base_mtx); return (ret); } -size_t -base_allocated_get(void) +void +base_stats_get(size_t *allocated, size_t *resident, size_t *mapped) { - size_t ret; malloc_mutex_lock(&base_mtx); - ret = base_allocated; + *allocated = base_allocated; + *resident = base_resident; + *mapped = base_mapped; malloc_mutex_unlock(&base_mtx); - return (ret); } bool diff --git a/src/ctl.c b/src/ctl.c index 447b8776..0ed8ddd4 100644 --- a/src/ctl.c +++ b/src/ctl.c @@ -194,6 +194,7 @@ CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) CTL_PROTO(stats_metadata) +CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) /******************************************************************************/ @@ -469,6 +470,7 @@ static const ctl_named_node_t stats_node[] = { {NAME("allocated"), CTL(stats_allocated)}, {NAME("active"), CTL(stats_active)}, {NAME("metadata"), CTL(stats_metadata)}, + {NAME("resident"), CTL(stats_resident)}, {NAME("mapped"), CTL(stats_mapped)}, {NAME("arenas"), CHILD(indexed, stats_arenas)} }; @@ -711,17 +713,23 @@ ctl_refresh(void) } if (config_stats) { + size_t base_allocated, base_resident, base_mapped; + base_stats_get(&base_allocated, &base_resident, &base_mapped); ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small - + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large - + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; + ctl_stats.arenas[ctl_stats.narenas].allocated_small + + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large + + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge; ctl_stats.active = (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE); - ctl_stats.metadata = base_allocated_get() - + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped - + ctl_stats.arenas[ctl_stats.narenas].astats + ctl_stats.metadata = base_allocated + + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + + ctl_stats.arenas[ctl_stats.narenas].astats .metadata_allocated; - ctl_stats.mapped = + ctl_stats.resident = base_resident + + ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped + + ((ctl_stats.arenas[ctl_stats.narenas].pactive + + ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE); + ctl_stats.mapped = base_mapped + ctl_stats.arenas[ctl_stats.narenas].astats.mapped; } @@ -1976,6 +1984,7 @@ CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t) CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) diff --git a/src/stats.c b/src/stats.c index b41b458b..c5cea5e6 100644 --- a/src/stats.c +++ b/src/stats.c @@ -573,16 +573,18 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque, if (config_stats) { size_t *cactive; - size_t allocated, active, metadata, mapped; + size_t allocated, active, metadata, resident, mapped; CTL_GET("stats.cactive", &cactive, size_t *); CTL_GET("stats.allocated", &allocated, size_t); CTL_GET("stats.active", &active, size_t); CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); CTL_GET("stats.mapped", &mapped, size_t); malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, metadata: %zu, mapped: %zu\n", - allocated, active, metadata, mapped); + "Allocated: %zu, active: %zu, metadata: %zu, resident: %zu," + " mapped: %zu\n", allocated, active, metadata, resident, + mapped); malloc_cprintf(write_cb, cbopaque, "Current active ceiling: %zu\n", atomic_read_z(cactive));