Add the "stats.cactive" mallctl.
Add the "stats.cactive" mallctl, which can be used to efficiently and repeatedly query approximately how much active memory the application is utilizing.
This commit is contained in:
parent
597632be18
commit
0657f12acd
@ -45,13 +45,13 @@ endif
|
|||||||
BINS := @srcroot@bin/pprof
|
BINS := @srcroot@bin/pprof
|
||||||
CHDRS := @objroot@include/jemalloc/jemalloc@install_suffix@.h \
|
CHDRS := @objroot@include/jemalloc/jemalloc@install_suffix@.h \
|
||||||
@objroot@include/jemalloc/jemalloc_defs@install_suffix@.h
|
@objroot@include/jemalloc/jemalloc_defs@install_suffix@.h
|
||||||
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/base.c \
|
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/arena.c @srcroot@src/atomic.c \
|
||||||
@srcroot@src/bitmap.c @srcroot@src/chunk.c @srcroot@src/chunk_dss.c \
|
@srcroot@src/base.c @srcroot@src/bitmap.c @srcroot@src/chunk.c \
|
||||||
@srcroot@src/chunk_mmap.c @srcroot@src/chunk_swap.c @srcroot@src/ckh.c \
|
@srcroot@src/chunk_dss.c @srcroot@src/chunk_mmap.c \
|
||||||
@srcroot@src/ctl.c @srcroot@src/extent.c @srcroot@src/hash.c \
|
@srcroot@src/chunk_swap.c @srcroot@src/ckh.c @srcroot@src/ctl.c \
|
||||||
@srcroot@src/huge.c @srcroot@src/mb.c @srcroot@src/mutex.c \
|
@srcroot@src/extent.c @srcroot@src/hash.c @srcroot@src/huge.c \
|
||||||
@srcroot@src/prof.c @srcroot@src/rtree.c \
|
@srcroot@src/mb.c @srcroot@src/mutex.c @srcroot@src/prof.c \
|
||||||
@srcroot@src/stats.c @srcroot@src/tcache.c
|
@srcroot@src/rtree.c @srcroot@src/stats.c @srcroot@src/tcache.c
|
||||||
ifeq (macho, @abi@)
|
ifeq (macho, @abi@)
|
||||||
CSRCS += @srcroot@src/zone.c
|
CSRCS += @srcroot@src/zone.c
|
||||||
endif
|
endif
|
||||||
@ -96,6 +96,7 @@ doc: $(DOCS)
|
|||||||
#
|
#
|
||||||
-include $(CSRCS:@srcroot@%.c=@objroot@%.d)
|
-include $(CSRCS:@srcroot@%.c=@objroot@%.d)
|
||||||
-include $(CSRCS:@srcroot@%.c=@objroot@%.pic.d)
|
-include $(CSRCS:@srcroot@%.c=@objroot@%.pic.d)
|
||||||
|
-include $(CTESTS:@srcroot@%.c=@objroot@%.d)
|
||||||
|
|
||||||
@objroot@src/%.o: @srcroot@src/%.c
|
@objroot@src/%.o: @srcroot@src/%.c
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
|
@ -1535,6 +1535,25 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
option for additional information.</para></listitem>
|
option for additional information.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.cactive">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.cactive</mallctl>
|
||||||
|
(<type>size_t *</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Pointer to a counter that contains an approximate count
|
||||||
|
of the current number of bytes in active pages. The estimate may be
|
||||||
|
high, but never low, because each arena rounds up to the nearest
|
||||||
|
multiple of the chunk size when computing its contribution to the
|
||||||
|
counter. Note that the <link
|
||||||
|
linkend="epoch"><mallctl>epoch</mallctl></link> mallctl has no bearing
|
||||||
|
on this counter. Furthermore, counter consistency is maintained via
|
||||||
|
atomic operations, so it is necessary to use an atomic operation in
|
||||||
|
order to guarantee a consistent read when dereferencing the pointer.
|
||||||
|
</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.allocated">
|
<varlistentry id="stats.allocated">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.allocated</mallctl>
|
<mallctl>stats.allocated</mallctl>
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
uint64_t hash(const void *key, size_t len, uint64_t seed);
|
uint64_t hash(const void *key, size_t len, uint64_t seed);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(HASH_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_))
|
||||||
/*
|
/*
|
||||||
* The following hash function is based on MurmurHash64A(), placed into the
|
* The following hash function is based on MurmurHash64A(), placed into the
|
||||||
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
|
* public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for
|
||||||
|
@ -213,6 +213,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
|
|||||||
#define PAGE_CEILING(s) \
|
#define PAGE_CEILING(s) \
|
||||||
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
(((s) + PAGE_MASK) & ~PAGE_MASK)
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/prn.h"
|
#include "jemalloc/internal/prn.h"
|
||||||
#include "jemalloc/internal/ckh.h"
|
#include "jemalloc/internal/ckh.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
@ -237,6 +238,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *wcbopaque, const char *s);
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#define JEMALLOC_H_STRUCTS
|
#define JEMALLOC_H_STRUCTS
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/prn.h"
|
#include "jemalloc/internal/prn.h"
|
||||||
#include "jemalloc/internal/ckh.h"
|
#include "jemalloc/internal/ckh.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
@ -352,6 +354,7 @@ int buferror(int errnum, char *buf, size_t buflen);
|
|||||||
void jemalloc_prefork(void);
|
void jemalloc_prefork(void);
|
||||||
void jemalloc_postfork(void);
|
void jemalloc_postfork(void);
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/prn.h"
|
#include "jemalloc/internal/prn.h"
|
||||||
#include "jemalloc/internal/ckh.h"
|
#include "jemalloc/internal/ckh.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
@ -376,6 +379,7 @@ void jemalloc_postfork(void);
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#define JEMALLOC_H_INLINES
|
#define JEMALLOC_H_INLINES
|
||||||
|
|
||||||
|
#include "jemalloc/internal/atomic.h"
|
||||||
#include "jemalloc/internal/prn.h"
|
#include "jemalloc/internal/prn.h"
|
||||||
#include "jemalloc/internal/ckh.h"
|
#include "jemalloc/internal/ckh.h"
|
||||||
#include "jemalloc/internal/stats.h"
|
#include "jemalloc/internal/stats.h"
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
void mb_write(void);
|
void mb_write(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(MB_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
|
||||||
#ifdef __i386__
|
#ifdef __i386__
|
||||||
/*
|
/*
|
||||||
* According to the Intel Architecture Software Developer's Manual, current
|
* According to the Intel Architecture Software Developer's Manual, current
|
||||||
|
@ -49,7 +49,7 @@ void *rtree_get(rtree_t *rtree, uintptr_t key);
|
|||||||
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
|
bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(RTREE_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
|
||||||
#define RTREE_GET_GENERATE(f) \
|
#define RTREE_GET_GENERATE(f) \
|
||||||
/* The least significant bits of the key are ignored. */ \
|
/* The least significant bits of the key are ignored. */ \
|
||||||
JEMALLOC_INLINE void * \
|
JEMALLOC_INLINE void * \
|
||||||
|
@ -154,6 +154,10 @@ struct chunk_stats_s {
|
|||||||
|
|
||||||
extern bool opt_stats_print;
|
extern bool opt_stats_print;
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
extern size_t stats_cactive;
|
||||||
|
#endif
|
||||||
|
|
||||||
char *u2s(uint64_t x, unsigned base, char *s);
|
char *u2s(uint64_t x, unsigned base, char *s);
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque,
|
||||||
@ -166,9 +170,38 @@ void stats_print(void (*write)(void *, const char *), void *cbopaque,
|
|||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
#ifdef JEMALLOC_H_INLINES
|
#ifdef JEMALLOC_H_INLINES
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
|
||||||
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
|
size_t stats_cactive_get(void);
|
||||||
|
void stats_cactive_add(size_t size);
|
||||||
|
void stats_cactive_sub(size_t size);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
stats_cactive_get(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (atomic_read_z(&stats_cactive));
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
stats_cactive_add(size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_add_z(&stats_cactive, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
stats_cactive_sub(size_t size)
|
||||||
|
{
|
||||||
|
|
||||||
|
atomic_sub_z(&stats_cactive, size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_INLINES */
|
|
||||||
#endif /* JEMALLOC_STATS */
|
#endif /* JEMALLOC_STATS */
|
||||||
|
#endif /* JEMALLOC_H_INLINES */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -315,6 +315,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
|||||||
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
|
size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
|
||||||
size_t flag_dirty;
|
size_t flag_dirty;
|
||||||
arena_avail_tree_t *runs_avail;
|
arena_avail_tree_t *runs_avail;
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
size_t cactive_diff;
|
||||||
|
#endif
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||||
old_ndirty = chunk->ndirty;
|
old_ndirty = chunk->ndirty;
|
||||||
@ -333,6 +336,13 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
|||||||
rem_pages = total_pages - need_pages;
|
rem_pages = total_pages - need_pages;
|
||||||
|
|
||||||
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
|
arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
/* Update stats_cactive if nactive is crossing a chunk multiple. */
|
||||||
|
cactive_diff = CHUNK_CEILING((arena->nactive + need_pages) <<
|
||||||
|
PAGE_SHIFT) - CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
|
||||||
|
if (cactive_diff != 0)
|
||||||
|
stats_cactive_add(cactive_diff);
|
||||||
|
#endif
|
||||||
arena->nactive += need_pages;
|
arena->nactive += need_pages;
|
||||||
|
|
||||||
/* Keep track of trailing unused pages for later use. */
|
/* Keep track of trailing unused pages for later use. */
|
||||||
@ -720,6 +730,9 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
assert(pageind + npages <= chunk_npages);
|
assert(pageind + npages <= chunk_npages);
|
||||||
if (mapelm->bits & CHUNK_MAP_DIRTY) {
|
if (mapelm->bits & CHUNK_MAP_DIRTY) {
|
||||||
size_t i;
|
size_t i;
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
size_t cactive_diff;
|
||||||
|
#endif
|
||||||
|
|
||||||
arena_avail_tree_remove(
|
arena_avail_tree_remove(
|
||||||
&arena->runs_avail_dirty, mapelm);
|
&arena->runs_avail_dirty, mapelm);
|
||||||
@ -742,6 +755,17 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
CHUNK_MAP_ALLOCATED;
|
CHUNK_MAP_ALLOCATED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
/*
|
||||||
|
* Update stats_cactive if nactive is crossing a
|
||||||
|
* chunk multiple.
|
||||||
|
*/
|
||||||
|
cactive_diff = CHUNK_CEILING((arena->nactive +
|
||||||
|
npages) << PAGE_SHIFT) -
|
||||||
|
CHUNK_CEILING(arena->nactive << PAGE_SHIFT);
|
||||||
|
if (cactive_diff != 0)
|
||||||
|
stats_cactive_add(cactive_diff);
|
||||||
|
#endif
|
||||||
arena->nactive += npages;
|
arena->nactive += npages;
|
||||||
/* Append to list for later processing. */
|
/* Append to list for later processing. */
|
||||||
ql_elm_new(mapelm, u.ql_link);
|
ql_elm_new(mapelm, u.ql_link);
|
||||||
@ -930,6 +954,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
|||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
size_t size, run_ind, run_pages, flag_dirty;
|
size_t size, run_ind, run_pages, flag_dirty;
|
||||||
arena_avail_tree_t *runs_avail;
|
arena_avail_tree_t *runs_avail;
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
size_t cactive_diff;
|
||||||
|
#endif
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||||
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
|
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
|
||||||
@ -951,6 +978,13 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
|||||||
size = bin_info->run_size;
|
size = bin_info->run_size;
|
||||||
}
|
}
|
||||||
run_pages = (size >> PAGE_SHIFT);
|
run_pages = (size >> PAGE_SHIFT);
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
/* Update stats_cactive if nactive is crossing a chunk multiple. */
|
||||||
|
cactive_diff = CHUNK_CEILING(arena->nactive << PAGE_SHIFT) -
|
||||||
|
CHUNK_CEILING((arena->nactive - run_pages) << PAGE_SHIFT);
|
||||||
|
if (cactive_diff != 0)
|
||||||
|
stats_cactive_sub(cactive_diff);
|
||||||
|
#endif
|
||||||
arena->nactive -= run_pages;
|
arena->nactive -= run_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -34,7 +34,7 @@
|
|||||||
* respectively.
|
* respectively.
|
||||||
*
|
*
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
#define CKH_C_
|
#define JEMALLOC_CKH_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -193,6 +193,7 @@ CTL_PROTO(stats_arenas_i_purged)
|
|||||||
#endif
|
#endif
|
||||||
INDEX_PROTO(stats_arenas_i)
|
INDEX_PROTO(stats_arenas_i)
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
|
CTL_PROTO(stats_cactive)
|
||||||
CTL_PROTO(stats_allocated)
|
CTL_PROTO(stats_allocated)
|
||||||
CTL_PROTO(stats_active)
|
CTL_PROTO(stats_active)
|
||||||
CTL_PROTO(stats_mapped)
|
CTL_PROTO(stats_mapped)
|
||||||
@ -460,6 +461,7 @@ static const ctl_node_t stats_arenas_node[] = {
|
|||||||
|
|
||||||
static const ctl_node_t stats_node[] = {
|
static const ctl_node_t stats_node[] = {
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
|
{NAME("cactive"), CTL(stats_cactive)},
|
||||||
{NAME("allocated"), CTL(stats_allocated)},
|
{NAME("allocated"), CTL(stats_allocated)},
|
||||||
{NAME("active"), CTL(stats_active)},
|
{NAME("active"), CTL(stats_active)},
|
||||||
{NAME("mapped"), CTL(stats_mapped)},
|
{NAME("mapped"), CTL(stats_mapped)},
|
||||||
@ -1580,6 +1582,7 @@ RETURN:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
|
CTL_RO_GEN(stats_cactive, &stats_cactive, size_t *)
|
||||||
CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
|
CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
|
||||||
CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
|
CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
|
||||||
CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)
|
CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
#define HASH_C_
|
#define JEMALLOC_HASH_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
@ -50,6 +50,7 @@ huge_malloc(size_t size, bool zero)
|
|||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
|
stats_cactive_add(csize);
|
||||||
huge_nmalloc++;
|
huge_nmalloc++;
|
||||||
huge_allocated += csize;
|
huge_allocated += csize;
|
||||||
#endif
|
#endif
|
||||||
@ -134,6 +135,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
|
|||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
|
stats_cactive_add(chunk_size);
|
||||||
huge_nmalloc++;
|
huge_nmalloc++;
|
||||||
huge_allocated += chunk_size;
|
huge_allocated += chunk_size;
|
||||||
#endif
|
#endif
|
||||||
@ -278,6 +280,7 @@ huge_dalloc(void *ptr, bool unmap)
|
|||||||
extent_tree_ad_remove(&huge, node);
|
extent_tree_ad_remove(&huge, node);
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
|
stats_cactive_sub(node->size);
|
||||||
huge_ndalloc++;
|
huge_ndalloc++;
|
||||||
huge_allocated -= node->size;
|
huge_allocated -= node->size;
|
||||||
#endif
|
#endif
|
||||||
|
@ -151,7 +151,7 @@ choose_arena_hard(void)
|
|||||||
choose = 0;
|
choose = 0;
|
||||||
first_null = narenas;
|
first_null = narenas;
|
||||||
malloc_mutex_lock(&arenas_lock);
|
malloc_mutex_lock(&arenas_lock);
|
||||||
assert(arenas[i] != NULL);
|
assert(arenas[0] != NULL);
|
||||||
for (i = 1; i < narenas; i++) {
|
for (i = 1; i < narenas; i++) {
|
||||||
if (arenas[i] != NULL) {
|
if (arenas[i] != NULL) {
|
||||||
/*
|
/*
|
||||||
|
@ -1,2 +1,2 @@
|
|||||||
#define MB_C_
|
#define JEMALLOC_MB_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#define RTREE_C_
|
#define JEMALLOC_RTREE_C_
|
||||||
#include "jemalloc/internal/jemalloc_internal.h"
|
#include "jemalloc/internal/jemalloc_internal.h"
|
||||||
|
|
||||||
rtree_t *
|
rtree_t *
|
||||||
|
@ -39,6 +39,10 @@
|
|||||||
|
|
||||||
bool opt_stats_print = false;
|
bool opt_stats_print = false;
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
size_t stats_cactive = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/* Function prototypes for non-inline static functions. */
|
||||||
|
|
||||||
@ -673,21 +677,26 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
size_t ssz;
|
size_t sszp, ssz;
|
||||||
|
size_t *cactive;
|
||||||
size_t allocated, active, mapped;
|
size_t allocated, active, mapped;
|
||||||
size_t chunks_current, chunks_high, swap_avail;
|
size_t chunks_current, chunks_high, swap_avail;
|
||||||
uint64_t chunks_total;
|
uint64_t chunks_total;
|
||||||
size_t huge_allocated;
|
size_t huge_allocated;
|
||||||
uint64_t huge_nmalloc, huge_ndalloc;
|
uint64_t huge_nmalloc, huge_ndalloc;
|
||||||
|
|
||||||
|
sszp = sizeof(size_t *);
|
||||||
ssz = sizeof(size_t);
|
ssz = sizeof(size_t);
|
||||||
|
|
||||||
|
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||||
CTL_GET("stats.allocated", &allocated, size_t);
|
CTL_GET("stats.allocated", &allocated, size_t);
|
||||||
CTL_GET("stats.active", &active, size_t);
|
CTL_GET("stats.active", &active, size_t);
|
||||||
CTL_GET("stats.mapped", &mapped, size_t);
|
CTL_GET("stats.mapped", &mapped, size_t);
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"Allocated: %zu, active: %zu, mapped: %zu\n", allocated,
|
"Allocated: %zu, active: %zu, mapped: %zu\n",
|
||||||
active, mapped);
|
allocated, active, mapped);
|
||||||
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
|
"Current active ceiling: %zu\n", atomic_read_z(cactive));
|
||||||
|
|
||||||
/* Print chunk stats. */
|
/* Print chunk stats. */
|
||||||
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
|
||||||
|
Loading…
Reference in New Issue
Block a user