Add the "stats.allocated" mallctl.

This commit is contained in:
Jason Evans 2015-03-23 17:25:57 -07:00
parent 8ad6bf360f
commit 4acd75a694
8 changed files with 67 additions and 23 deletions

View File

@ -63,6 +63,8 @@ found in the git revision history:
- Add metadata statistics, which are accessible via the "stats.metadata",
"stats.arenas.<i>.metadata.mapped", and
"stats.arenas.<i>.metadata.allocated" mallctls.
- Add the "stats.resident" mallctl, which reports the upper limit of
physically resident memory mapped by the allocator.
- Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump
feature on/off during program execution.
- Add sdallocx(), which implements sized deallocation. The primary

View File

@ -1938,6 +1938,23 @@ malloc_conf = "xmalloc:true";]]></programlisting>
linkend="stats.arenas.i.metadata.allocated"><mallctl>stats.arenas.&lt;i&gt;.metadata.allocated</mallctl></link>).</para></listitem>
</varlistentry>
<varlistentry id="stats.resident">
<term>
<mallctl>stats.resident</mallctl>
(<type>size_t</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Maximum number of bytes in physically resident data
pages mapped by the allocator, comprising all pages dedicated to
allocator metadata, pages backing active allocations, and unused dirty
pages. This is a maximum rather than precise because pages may not
actually be physically resident if they correspond to demand-zeroed
virtual memory that has not yet been touched. This is a multiple of the
page size, and is larger than <link
linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
</varlistentry>
<varlistentry id="stats.mapped">
<term>
<mallctl>stats.mapped</mallctl>
@ -1945,10 +1962,10 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Total number of bytes in chunks mapped on behalf of the
application. This is a multiple of the chunk size, and is at least as
<listitem><para>Total number of bytes in active chunks mapped by the
allocator. This is a multiple of the chunk size, and is at least as
large as <link
linkend="stats.active"><mallctl>stats.active</mallctl></link>. This
linkend="stats.resident"><mallctl>stats.resident</mallctl></link>. This
does not include inactive chunks.</para></listitem>
</varlistentry>

View File

@ -10,7 +10,7 @@
#ifdef JEMALLOC_H_EXTERNS
void *base_alloc(size_t size);
size_t base_allocated_get(void);
void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
bool base_boot(void);
void base_prefork(void);
void base_postfork_parent(void);

View File

@ -53,6 +53,7 @@ struct ctl_stats_s {
size_t allocated;
size_t active;
size_t metadata;
size_t resident;
size_t mapped;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */

View File

@ -111,11 +111,11 @@ atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
base_alloc
base_allocated_get
base_boot
base_postfork_child
base_postfork_parent
base_prefork
base_stats_get
bitmap_full
bitmap_get
bitmap_info_init

View File

@ -8,6 +8,8 @@ static malloc_mutex_t base_mtx;
static extent_tree_t base_avail_szad;
static extent_node_t *base_nodes;
static size_t base_allocated;
static size_t base_resident;
static size_t base_mapped;
/******************************************************************************/
@ -54,11 +56,15 @@ base_chunk_alloc(size_t minsize)
base_node_dalloc(node);
return (NULL);
}
base_mapped += csize;
if (node == NULL) {
node = (extent_node_t *)addr;
addr = (void *)((uintptr_t)addr + nsize);
csize -= nsize;
node = (extent_node_t *)((uintptr_t)addr + csize);
if (config_stats)
if (config_stats) {
base_allocated += nsize;
base_resident += PAGE_CEILING(nsize);
}
}
extent_node_init(node, NULL, addr, csize, true);
return (node);
@ -106,23 +112,30 @@ base_alloc(size_t size)
extent_tree_szad_insert(&base_avail_szad, node);
} else
base_node_dalloc(node);
if (config_stats)
if (config_stats) {
base_allocated += csize;
/*
* Add one PAGE to base_resident for every page boundary that is
* crossed by the new allocation.
*/
base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
PAGE_CEILING((uintptr_t)ret);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
label_return:
malloc_mutex_unlock(&base_mtx);
return (ret);
}
size_t
base_allocated_get(void)
void
base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
{
size_t ret;
malloc_mutex_lock(&base_mtx);
ret = base_allocated;
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
malloc_mutex_unlock(&base_mtx);
return (ret);
}
bool

View File

@ -194,6 +194,7 @@ CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
/******************************************************************************/
@ -469,6 +470,7 @@ static const ctl_named_node_t stats_node[] = {
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
@ -711,17 +713,23 @@ ctl_refresh(void)
}
if (config_stats) {
size_t base_allocated, base_resident, base_mapped;
base_stats_get(&base_allocated, &base_resident, &base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
ctl_stats.metadata = base_allocated_get()
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped
+ ctl_stats.arenas[ctl_stats.narenas].astats
ctl_stats.metadata = base_allocated +
ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats
.metadata_allocated;
ctl_stats.mapped =
ctl_stats.resident = base_resident +
ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
((ctl_stats.arenas[ctl_stats.narenas].pactive +
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
}
@ -1976,6 +1984,7 @@ CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)

View File

@ -573,16 +573,18 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_stats) {
size_t *cactive;
size_t allocated, active, metadata, mapped;
size_t allocated, active, metadata, resident, mapped;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu, mapped: %zu\n",
allocated, active, metadata, mapped);
"Allocated: %zu, active: %zu, metadata: %zu, resident: %zu,"
" mapped: %zu\n", allocated, active, metadata, resident,
mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));