Add the "stats.cactive" mallctl.

Add the "stats.cactive" mallctl, which can be used to efficiently and
repeatedly query approximately how much active memory the application is
utilizing.
This commit is contained in:
Jason Evans 2011-03-18 17:56:14 -07:00
parent 597632be18
commit 0657f12acd
16 changed files with 126 additions and 20 deletions

View File

@ -45,13 +45,13 @@ endif
BINS := @srcroot@bin/pprof
CHDRS := @objroot@include/jemalloc/jemalloc@install_suffix@.h \
@objroot@include/jemalloc/jemalloc_defs@install_suffix@.h
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/base.c \
@srcroot@src/bitmap.c @srcroot@src/chunk.c @srcroot@src/chunk_dss.c \
@srcroot@src/chunk_mmap.c @srcroot@src/chunk_swap.c @srcroot@src/ckh.c \
@srcroot@src/ctl.c @srcroot@src/extent.c @srcroot@src/hash.c \
@srcroot@src/huge.c @srcroot@src/mb.c @srcroot@src/mutex.c \
@srcroot@src/prof.c @srcroot@src/rtree.c \
@srcroot@src/stats.c @srcroot@src/tcache.c
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/atomic.c \
@srcroot@src/base.c @srcroot@src/bitmap.c @srcroot@src/chunk.c \
@srcroot@src/chunk_dss.c @srcroot@src/chunk_mmap.c \
@srcroot@src/chunk_swap.c @srcroot@src/ckh.c @srcroot@src/ctl.c \
@srcroot@src/extent.c @srcroot@src/hash.c @srcroot@src/huge.c \
@srcroot@src/mb.c @srcroot@src/mutex.c @srcroot@src/prof.c \
@srcroot@src/rtree.c @srcroot@src/stats.c @srcroot@src/tcache.c
ifeq (macho, @abi@)
CSRCS += @srcroot@src/zone.c
endif
@ -96,6 +96,7 @@ doc: $(DOCS)
#
-include $(CSRCS:@srcroot@%.c=@objroot@%.d)
-include $(CSRCS:@srcroot@%.c=@objroot@%.pic.d)
-include $(CTESTS:@srcroot@%.c=@objroot@%.d)
@objroot@src/%.o: @srcroot@src/%.c
@mkdir -p $(@D)

View File

@ -1535,6 +1535,25 @@ malloc_conf = "xmalloc:true";]]></programlisting>
option for additional information.</para></listitem>
</varlistentry>
<varlistentry id="stats.cactive">
<term>
<mallctl>stats.cactive</mallctl>
(<type>size_t *</type>)
<literal>r-</literal>
[<option>--enable-stats</option>]
</term>
<listitem><para>Pointer to a counter that contains an approximate count
of the current number of bytes in active pages. The estimate may be
high, but never low, because each arena rounds up to the nearest
multiple of the chunk size when computing its contribution to the
counter. Note that the <link
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
on this counter. Furthermore, counter consistency is maintained via
atomic operations, so it is necessary to use an atomic operation in
order to guarantee a consistent read when dereferencing the pointer.
</para></listitem>
</varlistentry>
<varlistentry id="stats.allocated">
<term>
<mallctl>stats.allocated</mallctl>

View File

@ -17,7 +17,7 @@
uint64_t hash(const void *key, size_t len, uint64_t seed);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(HASH_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
/*
* The following hash function is based on MurmurHash64A(), placed into the
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for

View File

@ -213,6 +213,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
@ -237,6 +238,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
/******************************************************************************/
#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
@ -352,6 +354,7 @@ int buferror(int errnum, char *buf, size_t buflen);
void jemalloc_prefork(void);
void jemalloc_postfork(void);
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"
@ -376,6 +379,7 @@ void jemalloc_postfork(void);
/******************************************************************************/
#define JEMALLOC_H_INLINES
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prn.h"
#include "jemalloc/internal/ckh.h"
#include "jemalloc/internal/stats.h"

View File

@ -17,7 +17,7 @@
void mb_write(void);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MB_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
#ifdef __i386__
/*
* According to the Intel Architecture Software Developer's Manual, current

View File

@ -49,7 +49,7 @@ void *rtree_get(rtree_t *rtree, uintptr_t key);
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(RTREE_C_))
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
JEMALLOC_INLINE void * \

View File

@ -154,6 +154,10 @@ struct chunk_stats_s {
extern bool opt_stats_print;
#ifdef JEMALLOC_STATS
extern size_t stats_cactive;
#endif
char *u2s(uint64_t x, unsigned base, char *s);
#ifdef JEMALLOC_STATS
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
@ -166,9 +170,38 @@ void stats_print(void (*write)(void *, const char *), void *cbopaque,
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_STATS
#ifdef JEMALLOC_H_INLINES
#ifdef JEMALLOC_STATS
#ifndef JEMALLOC_ENABLE_INLINE
size_t stats_cactive_get(void);
void stats_cactive_add(size_t size);
void stats_cactive_sub(size_t size);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
JEMALLOC_INLINE size_t
stats_cactive_get(void)
{
return (atomic_read_z(&stats_cactive));
}
JEMALLOC_INLINE void
stats_cactive_add(size_t size)
{
atomic_add_z(&stats_cactive, size);
}
JEMALLOC_INLINE void
stats_cactive_sub(size_t size)
{
atomic_sub_z(&stats_cactive, size);
}
#endif
#endif /* JEMALLOC_H_INLINES */
#endif /* JEMALLOC_STATS */
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -315,6 +315,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
arena_avail_tree_t *runs_avail;
#ifdef JEMALLOC_STATS
size_t cactive_diff;
#endif
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
old_ndirty = chunk->ndirty;
@ -333,6 +336,13 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
rem_pages = total_pages - need_pages;
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
#ifdef JEMALLOC_STATS
/* Update stats_cactive if nactive is crossing a chunk multiple. */
cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) <<
PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
#endif
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
@ -720,6 +730,9 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
assert(pageind + npages <= chunk_npages);
if (mapelm->bits & CHUNK_MAP_DIRTY) {
size_t i;
#ifdef JEMALLOC_STATS
size_t cactive_diff;
#endif
arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm);
@ -742,6 +755,17 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
CHUNK_MAP_ALLOCATED;
}
#ifdef JEMALLOC_STATS
/*
* Update stats_cactive if nactive is crossing a
* chunk multiple.
*/
cactive_diff = CHUNK_CEILING((arena->nactive +
npages) << PAGE_SHIFT) -
CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
if (cactive_diff != 0)
stats_cactive_add(cactive_diff);
#endif
arena->nactive += npages;
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
@ -930,6 +954,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
arena_avail_tree_t *runs_avail;
#ifdef JEMALLOC_STATS
size_t cactive_diff;
#endif
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
@ -951,6 +978,13 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
size = bin_info->run_size;
}
run_pages = (size >> PAGE_SHIFT);
#ifdef JEMALLOC_STATS
/* Update stats_cactive if nactive is crossing a chunk multiple. */
cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) -
CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT);
if (cactive_diff != 0)
stats_cactive_sub(cactive_diff);
#endif
arena->nactive -= run_pages;
/*

View File

@ -34,7 +34,7 @@
* respectively.
*
******************************************************************************/
#define CKH_C_
#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/

View File

@ -193,6 +193,7 @@ CTL_PROTO(stats_arenas_i_purged)
#endif
INDEX_PROTO(stats_arenas_i)
#ifdef JEMALLOC_STATS
CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped)
@ -460,6 +461,7 @@ static const ctl_node_t stats_arenas_node[] = {
static const ctl_node_t stats_node[] = {
#ifdef JEMALLOC_STATS
{NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("mapped"), CTL(stats_mapped)},
@ -1580,6 +1582,7 @@ RETURN:
}
#ifdef JEMALLOC_STATS
CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *)
CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)

View File

@ -1,2 +1,2 @@
#define HASH_C_
#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_internal.h"

View File

@ -50,6 +50,7 @@ huge_malloc(size_t size, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
@ -134,6 +135,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
@ -278,6 +280,7 @@ huge_dalloc(void *ptr, bool unmap)
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif

View File

@ -151,7 +151,7 @@ choose_arena_hard(void)
choose = 0;
first_null = narenas;
malloc_mutex_lock(&arenas_lock);
assert(arenas[i] != NULL);
assert(arenas[0] != NULL);
for (i = 1; i < narenas; i++) {
if (arenas[i] != NULL) {
/*

View File

@ -1,2 +1,2 @@
#define MB_C_
#define JEMALLOC_MB_C_
#include "jemalloc/internal/jemalloc_internal.h"

View File

@ -1,4 +1,4 @@
#define RTREE_C_
#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *

View File

@ -39,6 +39,10 @@
bool opt_stats_print = false;
#ifdef JEMALLOC_STATS
size_t stats_cactive = 0;
#endif
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@ -673,21 +677,26 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
#ifdef JEMALLOC_STATS
{
int err;
size_t ssz;
size_t sszp, ssz;
size_t *cactive;
size_t allocated, active, mapped;
size_t chunks_current, chunks_high, swap_avail;
uint64_t chunks_total;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc;
sszp = sizeof(size_t *);
ssz = sizeof(size_t);
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, mapped: %zu\n", allocated,
active, mapped);
"Allocated: %zu, active: %zu, mapped: %zu\n",
allocated, active, mapped);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n", atomic_read_z(cactive));
/* Print chunk stats. */
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);