Implement mallctl{nametomib,bymib}().

Replace chunk stats code that was missing locking; this fixes a race
condition that could corrupt chunk statistics.

Converting malloc_stats_print() to use mallctl*().

Add a missing semicolon in th DSS code.

Convert malloc_tcache_flush() to a mallctl.

Convert malloc_swap_enable() to a set of mallctl's.
This commit is contained in:
Jason Evans 2010-01-27 13:10:55 -08:00
parent fbbb624fc1
commit 3c2343518c
17 changed files with 2654 additions and 470 deletions

View File

@ -40,10 +40,10 @@ CHDRS := @objroot@src/jemalloc@install_suffix@.h \
CSRCS := @srcroot@src/jemalloc.c @srcroot@src/jemalloc_arena.c \ CSRCS := @srcroot@src/jemalloc.c @srcroot@src/jemalloc_arena.c \
@srcroot@src/jemalloc_base.c @srcroot@src/jemalloc_chunk.c \ @srcroot@src/jemalloc_base.c @srcroot@src/jemalloc_chunk.c \
@srcroot@src/jemalloc_chunk_dss.c @srcroot@src/jemalloc_chunk_mmap.c \ @srcroot@src/jemalloc_chunk_dss.c @srcroot@src/jemalloc_chunk_mmap.c \
@srcroot@src/jemalloc_chunk_swap.c @srcroot@src/jemalloc_extent.c \ @srcroot@src/jemalloc_chunk_swap.c @srcroot@src/jemalloc_ctl.c \
@srcroot@src/jemalloc_huge.c @srcroot@src/jemalloc_mutex.c \ @srcroot@src/jemalloc_extent.c @srcroot@src/jemalloc_huge.c \
@srcroot@src/jemalloc_stats.c @srcroot@src/jemalloc_tcache.c \ @srcroot@src/jemalloc_mutex.c @srcroot@src/jemalloc_stats.c \
@srcroot@src/jemalloc_trace.c @srcroot@src/jemalloc_tcache.c @srcroot@src/jemalloc_trace.c
DSOS := @objroot@lib/libjemalloc@install_suffix@.so.$(REV) \ DSOS := @objroot@lib/libjemalloc@install_suffix@.so.$(REV) \
@objroot@lib/libjemalloc@install_suffix@.so \ @objroot@lib/libjemalloc@install_suffix@.so \
@objroot@lib/libjemalloc@install_suffix@_pic.a @objroot@lib/libjemalloc@install_suffix@_pic.a

View File

@ -38,7 +38,7 @@
.\" @(#)malloc.3 8.1 (Berkeley) 6/4/93 .\" @(#)malloc.3 8.1 (Berkeley) 6/4/93
.\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $ .\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $
.\" .\"
.Dd January 23, 2010 .Dd January 27, 2010
.Dt JEMALLOC 3 .Dt JEMALLOC 3
.Os .Os
.Sh NAME .Sh NAME
@ -48,12 +48,13 @@
.Nm @jemalloc_prefix@realloc , .Nm @jemalloc_prefix@realloc ,
.Nm @jemalloc_prefix@free , .Nm @jemalloc_prefix@free ,
.Nm @jemalloc_prefix@malloc_usable_size , .Nm @jemalloc_prefix@malloc_usable_size ,
@roff_swap@.Nm @jemalloc_prefix@malloc_swap_enable , .Nm @jemalloc_prefix@malloc_stats_print ,
@roff_tcache@.Nm @jemalloc_prefix@malloc_tcache_flush , .Nm @jemalloc_prefix@mallctl ,
.Nm @jemalloc_prefix@malloc_stats_print .Nm @jemalloc_prefix@mallctlnametomib ,
.Nm @jemalloc_prefix@mallctlbymib
.Nd general purpose memory allocation functions .Nd general purpose memory allocation functions
.Sh LIBRARY .Sh LIBRARY
.Lb libjemalloc@install_suffix@ .Sy libjemalloc@install_suffix@
.Sh SYNOPSIS .Sh SYNOPSIS
.In stdlib.h .In stdlib.h
.In jemalloc@install_suffix@.h .In jemalloc@install_suffix@.h
@ -69,18 +70,18 @@
.Fn @jemalloc_prefix@free "void *ptr" .Fn @jemalloc_prefix@free "void *ptr"
.Ft size_t .Ft size_t
.Fn @jemalloc_prefix@malloc_usable_size "const void *ptr" .Fn @jemalloc_prefix@malloc_usable_size "const void *ptr"
@roff_swap@.Ft int
@roff_swap@.Fn @jemalloc_prefix@malloc_swap_enable "const int *fds" "unsigned nfds" "int prezeroed"
@roff_tcache@.Ft void
@roff_tcache@.Fn @jemalloc_prefix@malloc_tcache_flush "void"
.Ft void .Ft void
.Fn @jemalloc_prefix@malloc_stats_print "void (*write4)(void *" "const char *" "const char *" "const char *" "const char *)" "const char *opts" .Fn @jemalloc_prefix@malloc_stats_print "void (*write4)(void *" "const char *" "const char *" "const char *" "const char *)" "const char *opts"
.Ft int
.Fn @jemalloc_prefix@mallctl "const char *name" "void *oldp" "size_t *oldlenp" "void *newp" "size_t newlen"
.Ft int
.Fn @jemalloc_prefix@mallctlnametomib "const char *name" "int *mibp" "size_t *miblenp"
.Ft int
.Fn @jemalloc_prefix@mallctlbymib "const size_t *mib" "size_t miblen" "void *oldp" "size_t *oldlenp" "void *newp" "size_t newlen"
.Ft const char * .Ft const char *
.Va @jemalloc_prefix@malloc_options ; .Va @jemalloc_prefix@malloc_options ;
.Ft void .Ft void
.Fo \*(lp*@jemalloc_prefix@malloc_message\*(rp .Fn \*(lp*@jemalloc_prefix@malloc_message\*(rp "void *w4opaque" "const char *p1" "const char *p2" "const char *p3" "const char *p4"
.Fa "void *w4opaque" "const char *p1" "const char *p2" "const char *p3" "const char *p4"
.Fc
.Sh DESCRIPTION .Sh DESCRIPTION
The The
.Fn @jemalloc_prefix@malloc .Fn @jemalloc_prefix@malloc
@ -173,39 +174,6 @@ Any discrepancy between the requested allocation size and the size reported by
.Fn @jemalloc_prefix@malloc_usable_size .Fn @jemalloc_prefix@malloc_usable_size
should not be depended on, since such behavior is entirely should not be depended on, since such behavior is entirely
implementation-dependent. implementation-dependent.
@roff_swap@.Pp
@roff_swap@The
@roff_swap@.Fn @jemalloc_prefix@malloc_swap_enable
@roff_swap@function opens and contiguously maps a list of
@roff_swap@.Fa nfds
@roff_swap@file descriptors pointed to by
@roff_swap@.Fa fds
@roff_swap@via
@roff_swap@.Xr mmap 2 .
@roff_swap@The resulting virtual memory region is preferred over anonymous
@roff_swap@.Xr mmap 2
@roff_swap@@roff_dss@and
@roff_swap@@roff_dss@.Xr sbrk 2
@roff_swap@memory.
@roff_swap@Note that if a file's size is not a multiple of the page size, it is
@roff_swap@automatically truncated to the nearest page size multiple.
@roff_swap@If
@roff_swap@.Fa prezeroed
@roff_swap@is non-zero, the allocator assumes that the file(s) contain nothing
@roff_swap@but nil bytes.
@roff_swap@If this assumption is violated, allocator behavior is undefined.
@roff_tcache@.Pp
@roff_tcache@The
@roff_tcache@.Fn @jemalloc_prefix@malloc_tcache_flush
@roff_tcache@function releases all cached objects and internal data structures
@roff_tcache@associated with the calling thread's thread-specific cache.
@roff_tcache@Ordinarily, this function need not be called, since automatic
@roff_tcache@periodic incremental garbage collection occurs, and the thread
@roff_tcache@cache is automatically discarded when a thread exits.
@roff_tcache@However, garbage collection is triggered by allocation activity,
@roff_tcache@so it is possible for a thread that stops allocating/deallocating
@roff_tcache@to retain its cache indefinitely, in which case the developer may
@roff_tcache@find this function useful.
.Pp .Pp
The The
.Fn @jemalloc_prefix@malloc_stats_print .Fn @jemalloc_prefix@malloc_stats_print
@ -228,6 +196,12 @@ during execution can be omitted by specifying
as a character within the as a character within the
.Fa opts .Fa opts
string. string.
Note that
.Fn @jemalloc_prefix@malloc_message
uses the
.Fn @jemalloc_prefix@mallctl*
functions internally, so inconsistent statistics can be reported if multiple
threads use these functions simultaneously.
@roff_stats@.Dq m @roff_stats@.Dq m
@roff_stats@and @roff_stats@and
@roff_stats@.Dq a @roff_stats@.Dq a
@ -242,6 +216,79 @@ Unrecognized characters are silently ignored.
@roff_tcache@Note that thread caching may prevent some statistics from being @roff_tcache@Note that thread caching may prevent some statistics from being
@roff_tcache@completely up to date, since extra locking would be required to @roff_tcache@completely up to date, since extra locking would be required to
@roff_tcache@merge counters that track thread cache operations. @roff_tcache@merge counters that track thread cache operations.
.Pp
The
.Fn @jemalloc_prefix@mallctl
function provides a general interface for introspecting the memory allocator,
as well as setting modifiable parameters and triggering actions.
The period-separated
.Fa name
argument specifies a location in a tree-structured namespace; see the
.Sx "MALLCTL NAMESPACE"
section for documentation on the tree contents.
To read a value, pass a pointer via
.Fa oldp
to adequate space to contain the value, and a pointer to its length via
.Fa oldlenp ;
otherwise pass
.Dv NULL
and
.Dv NULL .
Similarly, to write a value, pass a pointer to the value via
.Fa newp ,
and its length via
.Fa newlen ;
otherwise pass
.Dv NULL
and 0.
.Pp
The
.Fn @jemalloc_prefix@mallctlnametomib
function provides a way to avoid repeated name lookups for applications that
repeatedly query the same portion of the namespace, by translating a name to a
.Dq Management Information Base
(MIB) that can be passed repeatedly to
.Fn @jemalloc_prefix@mallctlbymib .
Upon successful return from
.Fn @jemalloc_prefix@mallctlnametomib ,
.Fa mibp
contains an array of
.Fa *miblenp
integers, where
.Fa *miblenp
is the lesser of the number of components in
.Fa name
and the input value of
.Fa *miblenp .
Thus it is possible to pass a
.Fa *miblenp
that is smaller than the number of period-separated name components, which
results in a partial MIB that can be used as the basis for constructing a
complete MIB.
For name components that are integers (e.g. the 2 in
.Qq arenas.bin.2.size ) ,
the corresponding MIB component will always be that integer.
Therefore, it is legitimate to construct code like the following:
.Pp
.Bd -literal -offset indent -compact
unsigned nbins, i;
int mib[4];
size_t len, miblen;
len = sizeof(nbins);
@jemalloc_prefix@mallctl("arenas.nbins", &nbins, &len, NULL, 0);
miblen = 4;
@jemalloc_prefix@mallnametomib("arenas.bin.0.size", mib, &miblen);
for (i = 0; i < nbins; i++) {
size_t bin_size;
mib[2] = i;
len = sizeof(bin_size);
@jemalloc_prefix@mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0);
/* Do something with bin_size... */
}
.Ed
.Sh TUNING .Sh TUNING
Once, when the first call is made to one of these memory allocation Once, when the first call is made to one of these memory allocation
routines, various flags will be set or reset, which affects the routines, various flags will be set or reset, which affects the
@ -347,8 +394,8 @@ times the number of CPUs, or one if there is a single CPU.
@roff_swap@@roff_dss@.Xr sbrk 2 @roff_swap@@roff_dss@.Xr sbrk 2
@roff_swap@for virtual memory allocation. @roff_swap@for virtual memory allocation.
@roff_swap@In order for overcommit to be disabled, the @roff_swap@In order for overcommit to be disabled, the
@roff_swap@.Fn malloc_swap_enable @roff_swap@.Dq swap.fds
@roff_swap@function must have been successfully called. @roff_swap@mallctl must have been successfully written to.
@roff_swap@This option is enabled by default. @roff_swap@This option is enabled by default.
.It P .It P
The The
@ -503,7 +550,7 @@ no more than the minimum cacheline-multiple size class (see the
option) are rounded up to the nearest multiple of the option) are rounded up to the nearest multiple of the
@roff_tiny@quantum. @roff_tiny@quantum.
@roff_no_tiny@quantum (8 or 16, depending on architecture). @roff_no_tiny@quantum (8 or 16, depending on architecture).
Allocation requests that are more than the minumum cacheline-multiple size Allocation requests that are more than the minimum cacheline-multiple size
class, but no more than the minimum subpage-multiple size class (see the class, but no more than the minimum subpage-multiple size class (see the
.Dq C .Dq C
option) are rounded up to the nearest multiple of the cacheline size (64). option) are rounded up to the nearest multiple of the cacheline size (64).
@ -528,6 +575,592 @@ multi-threaded applications.
If you need to assure that allocations do not suffer from cacheline sharing, If you need to assure that allocations do not suffer from cacheline sharing,
round your allocation requests up to the nearest multiple of the cacheline round your allocation requests up to the nearest multiple of the cacheline
size. size.
.Sh MALLCTL NAMESPACE
The following names are defined in the namespace accessible via the
.Fn mallctl*
functions.
Value types are specified in parentheses, and their readable/writable statuses
are encoded as rw, r-, -w, or --.
A name element encoded as <i> or <j> indicates an integer component, where the
integer varies from 0 to some upper value that must be determined via
introspection.
@roff_stats@In the case of
@roff_stats@.Dq stats.arenas.<i>.* ,
@roff_stats@<i> equal to
@roff_stats@.Dq arenas.narenas
@roff_stats@can be used to access the summation of statistics from all arenas.
.Bl -ohang
.\"-----------------------------------------------------------------------------
.It Sy "epoch (uint64_t) rw"
.Bd -ragged -offset indent -compact
If a value is passed in, refresh the data from which the
.Fn mallctl*
functions report values, and increment the epoch.
Return the current epoch.
This is useful for detecting whether another thread caused a refresh.
.Ed
.\"-----------------------------------------------------------------------------
@roff_tcache@.It Sy "tcache.flush (void) --"
@roff_tcache@.Bd -ragged -offset indent -compact
@roff_tcache@Flush calling thread's tcache.
@roff_tcache@This interface releases all cached objects and internal data
@roff_tcache@structures associated with the calling thread's thread-specific
@roff_tcache@cache.
@roff_tcache@Ordinarily, this interface need not be called, since automatic
@roff_tcache@periodic incremental garbage collection occurs, and the thread
@roff_tcache@cache is automatically discarded when a thread exits.
@roff_tcache@However, garbage collection is triggered by allocation activity,
@roff_tcache@so it is possible for a thread that stops allocating/deallocating
@roff_tcache@to retain its cache indefinitely, in which case the developer may
@roff_tcache@find manual flushing useful.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.debug (bool) r-"
.Bd -ragged -offset indent -compact
--enable-debug was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.dss (bool) r-"
.Bd -ragged -offset indent -compact
--enable-dss was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.dynamic_page_shift (bool) r-"
.Bd -ragged -offset indent -compact
--enable-dynamic-page-shift was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.fill (bool) r-"
.Bd -ragged -offset indent -compact
--enable-fill was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.lazy_lock (bool) r-"
.Bd -ragged -offset indent -compact
--enable-lazy-lock was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.stats (bool) r-"
.Bd -ragged -offset indent -compact
--enable-stats was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.swap (bool) r-"
.Bd -ragged -offset indent -compact
--enable-swap was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.sysv (bool) r-"
.Bd -ragged -offset indent -compact
--enable-sysv was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.tcache (bool) r-"
.Bd -ragged -offset indent -compact
--disable-tcache was not specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.tiny (bool) r-"
.Bd -ragged -offset indent -compact
--disable-tiny was not specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.tls (bool) r-"
.Bd -ragged -offset indent -compact
--disable-tls was not specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.trace (bool) r-"
.Bd -ragged -offset indent -compact
--enable-trace was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "config.xmalloc (bool) r-"
.Bd -ragged -offset indent -compact
--enable-xmalloc was specified during build configuration.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.abort (bool) r-"
.Bd -ragged -offset indent -compact
See the
.Dq A
option.
.Ed
.\"-----------------------------------------------------------------------------
@roff_fill@.It Sy "opt.junk (bool) r-"
@roff_fill@.Bd -ragged -offset indent -compact
@roff_fill@See the
@roff_fill@.Dq J
@roff_fill@option.
@roff_fill@.Ed
.\"-----------------------------------------------------------------------------
@roff_fill@.It Sy "opt.zero (bool) r-"
@roff_fill@.Bd -ragged -offset indent -compact
@roff_fill@See the
@roff_fill@.Dq Z
@roff_fill@option.
@roff_fill@.Ed
.\"-----------------------------------------------------------------------------
@roff_xmalloc@.It Sy "opt.xmalloc (bool) r-"
@roff_xmalloc@.Bd -ragged -offset indent -compact
@roff_xmalloc@See the
@roff_xmalloc@.Dq X
@roff_xmalloc@option.
@roff_xmalloc@.Ed
.\"-----------------------------------------------------------------------------
@roff_tcache@.It Sy "opt.lg_tcache_nslots (size_t) r-"
@roff_tcache@.Bd -ragged -offset indent -compact
@roff_tcache@See the
@roff_tcache@.Dq H
@roff_tcache@option.
@roff_tcache@.Ed
.\"-----------------------------------------------------------------------------
@roff_tcache@.It Sy "opt.lg_tcache_gc_sweep (ssize_t) r-"
@roff_tcache@.Bd -ragged -offset indent -compact
@roff_tcache@See the
@roff_tcache@.Dq G
@roff_tcache@option.
@roff_tcache@.Ed
.\"-----------------------------------------------------------------------------
@roff_tcache@.It Sy "opt.tcache_sort (bool) r-"
@roff_tcache@.Bd -ragged -offset indent -compact
@roff_tcache@See the
@roff_tcache@.Dq S
@roff_tcache@option.
@roff_tcache@.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.stats_print (bool) r-"
.Bd -ragged -offset indent -compact
See the
.Dq P
option.
.Ed
.\"-----------------------------------------------------------------------------
@roff_trace@.It Sy "opt.trace (bool) r-"
@roff_trace@.Bd -ragged -offset indent -compact
@roff_trace@See the
@roff_trace@.Dq T
@roff_trace@option.
@roff_trace@.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.lg_qspace_max (size_t) r-"
.Bd -ragged -offset indent -compact
See the
.Dq Q
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.lg_cspace_max (size_t) r-"
.Bd -ragged -offset indent -compact
See the
.Dq C
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.lg_medium_max (size_t) r-"
.Bd -ragged -offset indent -compact
See the
.Dq M
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.lg_dirty_mult (ssize_t) r-"
.Bd -ragged -offset indent -compact
See the
.Dq D
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.lg_chunk (size_t) r-"
.Bd -ragged -offset indent -compact
See the
.Dq K
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "opt.overcommit (bool) r-"
.Bd -ragged -offset indent -compact
See the
.Dq O
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.narenas (unsigned) r-"
.Bd -ragged -offset indent -compact
Maximum number of arenas.
See the
.Dq N
option.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.initialized (bool *) r-"
.Bd -ragged -offset indent -compact
An array of arenas.narenas booleans.
Each boolean indicates whether the corresponding arena is initialized.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.quantum (size_t) r-"
.Bd -ragged -offset indent -compact
Quantum size.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.cacheline (size_t) r-"
.Bd -ragged -offset indent -compact
Assumed cacheline size.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.subpage (size_t) r-"
.Bd -ragged -offset indent -compact
Subpage size class interval.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.pagesize (size_t) r-"
.Bd -ragged -offset indent -compact
Page size.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.medium (size_t) r-"
.Bd -ragged -offset indent -compact
Medium size class interval.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.chunksize (size_t) r-"
.Bd -ragged -offset indent -compact
Chunk size.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.tspace_min (size_t) r-"
.Bd -ragged -offset indent -compact
Minimum tiny size class.
Tiny size classes are powers of two.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.tspace_max (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum tiny size class.
Tiny size classes are powers of two.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.qspace_min (size_t) r-"
.Bd -ragged -offset indent -compact
Minimum quantum-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.qspace_max (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum quantum-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.cspace_min (size_t) r-"
.Bd -ragged -offset indent -compact
Minimum cacheline-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.cspace_max (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum cacheline-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.sspace_min (size_t) r-"
.Bd -ragged -offset indent -compact
Minimum subpage-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.sspace_max (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum subpage-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.medium_min (size_t) r-"
.Bd -ragged -offset indent -compact
Minimum medium-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.medium_max (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum medium-spaced size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.ntbins (unsigned) r-"
.Bd -ragged -offset indent -compact
Number of tiny bin size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.nqbins (unsigned) r-"
.Bd -ragged -offset indent -compact
Number of quantum-spaced bin size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.ncbins (unsigned) r-"
.Bd -ragged -offset indent -compact
Number of cacheline-spaced bin size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.nsbins (unsigned) r-"
.Bd -ragged -offset indent -compact
Number of subpage-spaced bin size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.nmbins (unsigned) r-"
.Bd -ragged -offset indent -compact
Number of medium-spaced bin size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.nbins (unsigned) r-"
.Bd -ragged -offset indent -compact
Total number of bin size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.bin.<i>.size (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum size supported by size class.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.bin.<i>.nregs (uint32_t) r-"
.Bd -ragged -offset indent -compact
Number of regions per page run.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.bin.<i>.run_size (size_t) r-"
.Bd -ragged -offset indent -compact
Number of bytes per page run.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.nlruns (size_t) r-"
.Bd -ragged -offset indent -compact
Total number of large size classes.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "arenas.lrun.<i>.size (size_t) r-"
.Bd -ragged -offset indent -compact
Maximum size supported by this large size class.
.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.allocated (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Total number of bytes allocated by the application.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.active (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Total number of bytes in active pages allocated by the application.
@roff_stats@This is a multiple of the page size, and is larger than
@roff_stats@.Dq stats.allocated .
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.mapped (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Total number of bytes in chunks mapped on behalf of the application.
@roff_stats@This is a multiple of the chunk size, and is at least as large as
@roff_stats@.Dq stats.active .
@roff_stats@@roff_swap@This does not include inactive chunks backed by swap
@roff_stats@@roff_swap@files.
@roff_stats@@roff_dss@This does not include inactive chunks embedded in the DSS.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.chunks.current (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Total number of chunks actively mapped on behalf of the application.
@roff_stats@@roff_swap@This does not include inactive chunks backed by swap
@roff_stats@@roff_swap@files.
@roff_stats@@roff_dss@This does not include inactive chunks embedded in the DSS.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.chunks.total (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of chunks allocated.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.chunks.high (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Maximum number of active chunks at any time thus far.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.huge.allocated (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of bytes currently allocated by huge objects.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.huge.nmalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of huge allocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.huge.ndalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of huge deallocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
.It Sy "stats.arenas.<i>.pactive (size_t) r-"
.Bd -ragged -offset indent -compact
Number of pages in active runs.
.Ed
.\"-----------------------------------------------------------------------------
.It Sy "stats.arenas.<i>.pdirty (size_t) r-"
.Bd -ragged -offset indent -compact
Number of pages within unused runs that are potentially dirty, and for which
.Fn madvise "..." "MADV_DONTNEED"
has not been called.
.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.mapped (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of mapped bytes.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.npurge (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of dirty page purge sweeps performed.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.nmadvise (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of
@roff_stats@.Fn madvise "..." "MADV_DONTNEED"
@roff_stats@calls made to purge dirty pages.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.npurged (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of pages purged.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.small.allocated (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of bytes currently allocated by small objects.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.small.nmalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of small allocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.small.ndalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of small deallocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.medium.allocated (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of bytes currently allocated by medium objects.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.medium.nmalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of medium allocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.medium.ndalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of medium deallocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.large.allocated (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Number of bytes currently allocated by large objects.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.large.nmalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of large allocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.large.ndalloc (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of large deallocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nrequests (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of allocation requests.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@@roff_tcache@.It Sy "stats.arenas.<i>.bins.<j>.nfills (uint64_t) r-"
@roff_stats@@roff_tcache@.Bd -ragged -offset indent -compact
@roff_stats@@roff_tcache@Cumulative number of tcache fills.
@roff_stats@@roff_tcache@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@@roff_tcache@.It Sy "stats.arenas.<i>.bins.<j>.nflushes (uint64_t) r-"
@roff_stats@@roff_tcache@.Bd -ragged -offset indent -compact
@roff_stats@@roff_tcache@Cumulative number of tcache flushes.
@roff_stats@@roff_tcache@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nruns (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of runs created.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.nreruns (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of times the current run from which to allocate
@roff_stats@changed.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.highruns (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Maximum number of runs at any time thus far.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.bins.<j>.curruns (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Current number of runs.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.nrequests (uint64_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Cumulative number of allocation requests for this size class.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.highruns (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Maximum number of runs at any time thus far for this size class.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@.It Sy "stats.arenas.<i>.lruns.<j>.curruns (size_t) r-"
@roff_stats@.Bd -ragged -offset indent -compact
@roff_stats@Current number of runs for this size class.
@roff_stats@.Ed
.\"-----------------------------------------------------------------------------
@roff_stats@@roff_swap@.It Sy "swap.avail (size_t) r-"
@roff_stats@@roff_swap@.Bd -ragged -offset indent -compact
@roff_stats@@roff_swap@Number of swap file bytes that are currently not
@roff_stats@@roff_swap@associated with any chunk (i.e. mapped, but otherwise
@roff_stats@@roff_swap@completely unmanaged).
@roff_stats@@roff_swap@.Ed
.\"-----------------------------------------------------------------------------
@roff_swap@.It Sy "swap.prezeroed (bool) rw"
@roff_swap@.Bd -ragged -offset indent -compact
@roff_swap@If true, the allocator assumes that the swap file(s) contain nothing
@roff_swap@but nil bytes.
@roff_swap@If this assumption is violated, allocator behavior is undefined.
@roff_swap@This value becomes read-only after
@roff_swap@.Dq swap.fds
@roff_swap@is successfully written to.
@roff_swap@.Ed
.\"-----------------------------------------------------------------------------
@roff_swap@.It Sy "swap.nfds (size_t) r-"
@roff_swap@.Bd -ragged -offset indent -compact
@roff_swap@Number of file descriptors in use for swap.
@roff_swap@.Ed
.\"-----------------------------------------------------------------------------
@roff_swap@.It Sy "swap.fds (int *) r-"
@roff_swap@.Bd -ragged -offset indent -compact
@roff_swap@When written to, the files associated with the specified file
@roff_swap@descriptors are contiguously mapped via
@roff_swap@.Xr mmap 2 .
@roff_swap@The resulting virtual memory region is preferred over anonymous
@roff_swap@.Xr mmap 2
@roff_swap@@roff_dss@and
@roff_swap@@roff_dss@.Xr sbrk 2
@roff_swap@memory.
@roff_swap@Note that if a file's size is not a multiple of the page size, it is
@roff_swap@automatically truncated to the nearest page size multiple.
@roff_swap@See the
@roff_swap@.Dq swap.prezeroed
@roff_swap@interface for specifying that the files are pre-zeroed.
@roff_swap@.Ed
.\"-----------------------------------------------------------------------------
.El
.Sh DEBUGGING MALLOC PROBLEMS .Sh DEBUGGING MALLOC PROBLEMS
The first thing to do is to set the The first thing to do is to set the
.Dq A .Dq A
@ -646,11 +1279,43 @@ The
.Fn @jemalloc_prefix@malloc_usable_size .Fn @jemalloc_prefix@malloc_usable_size
function returns the usable size of the allocation pointed to by function returns the usable size of the allocation pointed to by
.Fa ptr . .Fa ptr .
@roff_swap@.Pp .Pp
@roff_swap@The The
@roff_swap@.Fn @jemalloc_prefix@malloc_swap_enable .Fn @jemalloc_prefix@mallctl ,
@roff_swap@function returns the value 0 if successful; otherwise it returns a .Fn @jemalloc_prefix@mallctlnametomib ,
@roff_swap@non-zero value. and
.Fn @jemalloc_prefix@mallctlbymib
functions return 0 on success; otherwise they return an error value.
The functions will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
.Fa newp
is
.Dv non-NULL ,
and
.Fa newlen
is too large or too small.
Alternatively,
.Fa *oldlenp
is too large or too small; in this case as much data as possible are read
despite the error.
.It Bq Er ENOMEM
.Fa *oldlenp
is too short to hold the requested value.
.It Bq Er ENOENT
.Fa name
or
.Fa mib
specifies an unknown/invalid value.
.It Bq Er EPERM
Attempt to read or write void value, or attempt to write read-only value.
.It Bq Er EAGAIN
A memory allocation failure occurred.
.It Bq Er EFAULT
An interface with side effects failed in some way not directly related to
.Fn mallctl*
read/write processing.
.El
.Sh ENVIRONMENT .Sh ENVIRONMENT
The following environment variables affect the execution of the allocation The following environment variables affect the execution of the allocation
functions: functions:

View File

@ -391,6 +391,8 @@ extern size_t medium_max;
extern size_t lg_mspace; extern size_t lg_mspace;
extern size_t mspace_mask; extern size_t mspace_mask;
#define nlclasses ((chunksize - PAGE_SIZE) >> PAGE_SHIFT)
#ifdef JEMALLOC_TCACHE #ifdef JEMALLOC_TCACHE
void arena_tcache_fill(arena_t *arena, tcache_bin_t *tbin, size_t binind); void arena_tcache_fill(arena_t *arena, tcache_bin_t *tbin, size_t binind);
#endif #endif
@ -407,14 +409,6 @@ void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats); malloc_large_stats_t *lstats);
void arena_stats_mprint(arena_t *arena, size_t nactive, size_t ndirty,
const arena_stats_t *astats, const malloc_bin_stats_t *bstats,
const malloc_large_stats_t *lstats, bool bins, bool large,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque);
void arena_stats_print(arena_t *arena, bool bins, bool large,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque);
#endif #endif
void *arena_ralloc(void *ptr, size_t size, size_t oldsize); void *arena_ralloc(void *ptr, size_t size, size_t oldsize);
bool arena_new(arena_t *arena, unsigned ind); bool arena_new(arena_t *arena, unsigned ind);

View File

@ -33,6 +33,8 @@ extern bool opt_overcommit;
#endif #endif
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
/* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx;
/* Chunk statistics. */ /* Chunk statistics. */
extern chunk_stats_t stats_chunks; extern chunk_stats_t stats_chunks;
#endif #endif

View File

@ -12,6 +12,9 @@
extern malloc_mutex_t swap_mtx; extern malloc_mutex_t swap_mtx;
extern bool swap_enabled; extern bool swap_enabled;
extern bool swap_prezeroed;
extern size_t swap_nfds;
extern int *swap_fds;
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
extern size_t swap_avail; extern size_t swap_avail;
#endif #endif

View File

@ -0,0 +1,108 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
typedef struct ctl_node_s ctl_node_t;
typedef struct ctl_arena_stats_s ctl_arena_stats_t;
typedef struct ctl_stats_s ctl_stats_t;
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct ctl_node_s {
bool named;
union {
struct {
const char *name;
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
} named;
struct {
const ctl_node_t *(*index)(const size_t *, size_t,
size_t);
} indexed;
} u;
int (*ctl)(const size_t *, size_t, void *, size_t *, void *,
size_t);
};
struct ctl_arena_stats_s {
bool initialized;
size_t pactive;
size_t pdirty;
#ifdef JEMALLOC_STATS
arena_stats_t astats;
malloc_bin_stats_t *bstats; /* nbins elements. */
malloc_large_stats_t *lstats; /* nlclasses elements. */
#endif
};
struct ctl_stats_s {
#ifdef JEMALLOC_STATS
size_t allocated;
size_t active;
size_t mapped;
struct {
size_t current; /* stats_chunks.curchunks */
uint64_t total; /* stats_chunks.nchunks */
size_t high; /* stats_chunks.highchunks */
} chunks;
struct {
size_t allocated; /* huge_allocated */
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
#endif
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
#ifdef JEMALLOC_SWAP
size_t swap_avail;
#endif
};
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen);
int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
bool ctl_boot(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (mallctl(name, oldp, oldlenp, newp, newlen) != 0) { \
malloc_write4("<jemalloc>: Invalid xmallctl(\"", name, \
"\", ...) call\n", ""); \
abort(); \
} \
} while (0)
#define xmallctlnametomib(name, mibp, miblenp) do { \
if (mallctlnametomib(name, mibp, miblenp) != 0) { \
malloc_write4( \
"<jemalloc>: Invalid xmallctlnametomib(\"", name, \
"\", ...) call\n", ""); \
abort(); \
} \
} while (0)
#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \
if (mallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) \
!= 0) { \
malloc_write4( \
"<jemalloc>: Invalid xmallctlbymib() call\n", "", \
"", ""); \
abort(); \
} \
} while (0)
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -170,6 +170,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *w4opaque, const char *p1,
(((s) + PAGE_MASK) & ~PAGE_MASK) (((s) + PAGE_MASK) & ~PAGE_MASK)
#include "internal/jemalloc_stats.h" #include "internal/jemalloc_stats.h"
#include "internal/jemalloc_ctl.h"
#include "internal/jemalloc_mutex.h" #include "internal/jemalloc_mutex.h"
#include "internal/jemalloc_extent.h" #include "internal/jemalloc_extent.h"
#include "internal/jemalloc_arena.h" #include "internal/jemalloc_arena.h"
@ -184,6 +185,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *w4opaque, const char *p1,
#define JEMALLOC_H_STRUCTS #define JEMALLOC_H_STRUCTS
#include "internal/jemalloc_stats.h" #include "internal/jemalloc_stats.h"
#include "internal/jemalloc_ctl.h"
#include "internal/jemalloc_mutex.h" #include "internal/jemalloc_mutex.h"
#include "internal/jemalloc_extent.h" #include "internal/jemalloc_extent.h"
#include "internal/jemalloc_arena.h" #include "internal/jemalloc_arena.h"
@ -220,6 +222,7 @@ extern size_t lg_pagesize;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
#ifndef NO_TLS #ifndef NO_TLS
/* /*
* Map of pthread_self() --> arenas[???], used for selecting an arena to use * Map of pthread_self() --> arenas[???], used for selecting an arena to use
@ -240,6 +243,7 @@ arena_t *choose_arena_hard(void);
#endif #endif
#include "internal/jemalloc_stats.h" #include "internal/jemalloc_stats.h"
#include "internal/jemalloc_ctl.h"
#include "internal/jemalloc_mutex.h" #include "internal/jemalloc_mutex.h"
#include "internal/jemalloc_extent.h" #include "internal/jemalloc_extent.h"
#include "internal/jemalloc_arena.h" #include "internal/jemalloc_arena.h"
@ -254,6 +258,7 @@ arena_t *choose_arena_hard(void);
#define JEMALLOC_H_INLINES #define JEMALLOC_H_INLINES
#include "internal/jemalloc_stats.h" #include "internal/jemalloc_stats.h"
#include "internal/jemalloc_ctl.h"
#include "internal/jemalloc_mutex.h" #include "internal/jemalloc_mutex.h"
#include "internal/jemalloc_extent.h" #include "internal/jemalloc_extent.h"
#include "internal/jemalloc_base.h" #include "internal/jemalloc_base.h"

View File

@ -21,7 +21,7 @@ void trace_free(const void *ptr, size_t size);
void trace_malloc_usable_size(size_t size, const void *ptr); void trace_malloc_usable_size(size_t size, const void *ptr);
void trace_thread_exit(void); void trace_thread_exit(void);
void trace_boot(void); bool trace_boot(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/

View File

@ -95,12 +95,12 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
malloc_mutex_t arenas_lock;
arena_t **arenas; arena_t **arenas;
unsigned narenas; unsigned narenas;
#ifndef NO_TLS #ifndef NO_TLS
static unsigned next_arena; static unsigned next_arena;
#endif #endif
static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
#ifndef NO_TLS #ifndef NO_TLS
__thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec")); __thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec"));
@ -710,9 +710,18 @@ MALLOC_OUT:
} }
} }
if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
#ifdef JEMALLOC_TRACE #ifdef JEMALLOC_TRACE
if (opt_trace) if (opt_trace) {
trace_boot(); if (trace_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
}
#endif #endif
if (opt_stats_print) { if (opt_stats_print) {
/* Print statistics at exit. */ /* Print statistics at exit. */
@ -722,6 +731,11 @@ MALLOC_OUT:
/* Register fork handlers. */ /* Register fork handlers. */
pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork); pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork);
if (base_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (arena_boot0()) { if (arena_boot0()) {
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
return (true); return (true);
@ -742,11 +756,6 @@ MALLOC_OUT:
return (true); return (true);
} }
if (huge_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
/* /*
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
* malloc_ncpus(). * malloc_ncpus().
@ -1222,22 +1231,6 @@ JEMALLOC_P(malloc_swap_enable)(const int *fds, unsigned nfds, int prezeroed)
} }
#endif #endif
#ifdef JEMALLOC_TCACHE
JEMALLOC_ATTR(visibility("default"))
void
JEMALLOC_P(malloc_tcache_flush)(void)
{
tcache_t *tcache;
tcache = tcache_tls;
if (tcache == NULL)
return;
tcache_destroy(tcache);
tcache_tls = NULL;
}
#endif
JEMALLOC_ATTR(visibility("default")) JEMALLOC_ATTR(visibility("default"))
void void
JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *, JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *,
@ -1247,6 +1240,32 @@ JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *,
stats_print(write4, w4opaque, opts); stats_print(write4, w4opaque, opts);
} }
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
{
return (ctl_nametomib(name, mibp, miblenp));
}
JEMALLOC_ATTR(visibility("default"))
int
JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
/* /*
* End non-standard functions. * End non-standard functions.
*/ */

View File

@ -21,16 +21,15 @@ void *JEMALLOC_P(realloc)(void *ptr, size_t size);
void JEMALLOC_P(free)(void *ptr); void JEMALLOC_P(free)(void *ptr);
size_t JEMALLOC_P(malloc_usable_size)(const void *ptr); size_t JEMALLOC_P(malloc_usable_size)(const void *ptr);
#ifdef JEMALLOC_SWAP
int JEMALLOC_P(malloc_swap_enable)(const int *fds, unsigned nfds,
int prezeroed);
#endif
#ifdef JEMALLOC_TCACHE
void JEMALLOC_P(malloc_tcache_flush)(void);
#endif
void JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *, void JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *,
const char *, const char *, const char *), void *w4opaque, const char *, const char *, const char *), void *w4opaque,
const char *opts); const char *opts);
int JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp,
size_t *miblenp);
int JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
#ifdef __cplusplus #ifdef __cplusplus
}; };

View File

@ -172,19 +172,6 @@ static void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
static bool arena_is_large(const void *ptr); static bool arena_is_large(const void *ptr);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin); arena_run_t *run, arena_bin_t *bin);
#ifdef JEMALLOC_STATS
static void arena_stats_aprint(size_t nactive, size_t ndirty,
const arena_stats_t *astats,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque);
static void arena_stats_bprint(arena_t *arena,
const malloc_bin_stats_t *bstats,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque);
static void arena_stats_lprint(const malloc_large_stats_t *lstats,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque);
#endif
static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t size, size_t oldsize); void *ptr, size_t size, size_t oldsize);
static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
@ -1570,7 +1557,7 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats) malloc_large_stats_t *lstats)
{ {
unsigned i, nlclasses; unsigned i;
*nactive += arena->nactive; *nactive += arena->nactive;
*ndirty += arena->ndirty; *ndirty += arena->ndirty;
@ -1601,193 +1588,12 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
bstats[i].curruns += arena->bins[i].stats.curruns; bstats[i].curruns += arena->bins[i].stats.curruns;
} }
for (i = 0, nlclasses = (chunksize - PAGE_SIZE) >> PAGE_SHIFT; for (i = 0; i < nlclasses; i++) {
i < nlclasses;
i++) {
lstats[i].nrequests += arena->stats.lstats[i].nrequests; lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].highruns += arena->stats.lstats[i].highruns; lstats[i].highruns += arena->stats.lstats[i].highruns;
lstats[i].curruns += arena->stats.lstats[i].curruns; lstats[i].curruns += arena->stats.lstats[i].curruns;
} }
} }
static void
arena_stats_aprint(size_t nactive, size_t ndirty, const arena_stats_t *astats,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque)
{
malloc_cprintf(write4, w4opaque,
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
nactive, ndirty,
astats->npurge, astats->npurge == 1 ? "" : "s",
astats->nmadvise, astats->nmadvise == 1 ? "" : "s",
astats->purged);
malloc_cprintf(write4, w4opaque,
" allocated nmalloc ndalloc\n");
malloc_cprintf(write4, w4opaque,
"small: %12zu %12"PRIu64" %12"PRIu64"\n",
astats->allocated_small, astats->nmalloc_small,
astats->ndalloc_small);
malloc_cprintf(write4, w4opaque,
"medium: %12zu %12"PRIu64" %12"PRIu64"\n",
astats->allocated_medium, astats->nmalloc_medium,
astats->ndalloc_medium);
malloc_cprintf(write4, w4opaque,
"large: %12zu %12"PRIu64" %12"PRIu64"\n",
astats->allocated_large, astats->nmalloc_large,
astats->ndalloc_large);
malloc_cprintf(write4, w4opaque,
"total: %12zu %12"PRIu64" %12"PRIu64"\n",
astats->allocated_small + astats->allocated_medium +
astats->allocated_large, astats->nmalloc_small +
astats->nmalloc_medium + astats->nmalloc_large,
astats->ndalloc_small + astats->ndalloc_medium +
astats->ndalloc_large);
malloc_cprintf(write4, w4opaque, "mapped: %12zu\n", astats->mapped);
}
static void
arena_stats_bprint(arena_t *arena, const malloc_bin_stats_t *bstats,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque)
{
unsigned i, gap_start;
#ifdef JEMALLOC_TCACHE
malloc_cprintf(write4, w4opaque,
"bins: bin size regs pgs requests "
"nfills nflushes newruns reruns maxruns curruns\n");
#else
malloc_cprintf(write4, w4opaque,
"bins: bin size regs pgs requests "
"newruns reruns maxruns curruns\n");
#endif
for (i = 0, gap_start = UINT_MAX; i < nbins; i++) {
if (bstats[i].nruns == 0) {
if (gap_start == UINT_MAX)
gap_start = i;
} else {
if (gap_start != UINT_MAX) {
if (i > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write4, w4opaque,
"[%u..%u]\n", gap_start,
i - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write4, w4opaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
}
malloc_cprintf(write4, w4opaque,
"%13u %1s %5u %4u %3u %9"PRIu64" %9"PRIu64
#ifdef JEMALLOC_TCACHE
" %9"PRIu64" %9"PRIu64""
#endif
" %9"PRIu64" %7zu %7zu\n",
i,
i < ntbins ? "T" : i < ntbins + nqbins ?
"Q" : i < ntbins + nqbins + ncbins ? "C" :
i < ntbins + nqbins + ncbins + nsbins ? "S"
: "M",
arena->bins[i].reg_size,
arena->bins[i].nregs,
arena->bins[i].run_size >> PAGE_SHIFT,
bstats[i].nrequests,
#ifdef JEMALLOC_TCACHE
bstats[i].nfills,
bstats[i].nflushes,
#endif
bstats[i].nruns,
bstats[i].reruns,
bstats[i].highruns,
bstats[i].curruns);
}
}
if (gap_start != UINT_MAX) {
if (i > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write4, w4opaque, "[%u..%u]\n",
gap_start, i - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write4, w4opaque, "[%u]\n", gap_start);
}
}
}
static void
arena_stats_lprint(const malloc_large_stats_t *lstats,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque)
{
size_t i;
ssize_t gap_start;
size_t nlclasses = (chunksize - PAGE_SIZE) >> PAGE_SHIFT;
malloc_cprintf(write4, w4opaque,
"large: size pages nrequests maxruns curruns\n");
for (i = 0, gap_start = -1; i < nlclasses; i++) {
if (lstats[i].nrequests == 0) {
if (gap_start == -1)
gap_start = i;
} else {
if (gap_start != -1) {
malloc_cprintf(write4, w4opaque, "[%zu]\n",
i - gap_start);
gap_start = -1;
}
malloc_cprintf(write4, w4opaque,
"%13zu %5zu %9"PRIu64" %9zu %9zu\n",
(i+1) << PAGE_SHIFT, i+1,
lstats[i].nrequests,
lstats[i].highruns,
lstats[i].curruns);
}
}
if (gap_start != -1)
malloc_cprintf(write4, w4opaque, "[%zu]\n", i - gap_start);
}
void
arena_stats_mprint(arena_t *arena, size_t nactive, size_t ndirty,
const arena_stats_t *astats, const malloc_bin_stats_t *bstats,
const malloc_large_stats_t *lstats, bool bins, bool large,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque)
{
arena_stats_aprint(nactive, ndirty, astats, write4, w4opaque);
if (bins && astats->nmalloc_small + astats->nmalloc_medium > 0)
arena_stats_bprint(arena, bstats, write4, w4opaque);
if (large && astats->nmalloc_large > 0)
arena_stats_lprint(lstats, write4, w4opaque);
}
void
arena_stats_print(arena_t *arena, bool bins, bool large,
void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque)
{
size_t nactive, ndirty;
arena_stats_t astats;
malloc_bin_stats_t bstats[nbins];
malloc_large_stats_t lstats[((chunksize - PAGE_SIZE) >> PAGE_SHIFT)];
nactive = 0;
ndirty = 0;
memset(&astats, 0, sizeof(astats));
memset(bstats, 0, sizeof(bstats));
memset(lstats, 0, sizeof(lstats));
arena_stats_merge(arena, &nactive, &ndirty, &astats, bstats, lstats);
arena_stats_mprint(arena, nactive, ndirty, &astats, bstats, lstats,
bins, large, write4, w4opaque);
}
#endif #endif
void void

View File

@ -10,6 +10,7 @@ bool opt_overcommit = true;
#endif #endif
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks; chunk_stats_t stats_chunks;
#endif #endif
@ -64,11 +65,13 @@ chunk_alloc(size_t size, bool *zero)
RETURN: RETURN:
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
if (ret != NULL) { if (ret != NULL) {
malloc_mutex_lock(&chunks_mtx);
stats_chunks.nchunks += (size / chunksize); stats_chunks.nchunks += (size / chunksize);
stats_chunks.curchunks += (size / chunksize); stats_chunks.curchunks += (size / chunksize);
}
if (stats_chunks.curchunks > stats_chunks.highchunks) if (stats_chunks.curchunks > stats_chunks.highchunks)
stats_chunks.highchunks = stats_chunks.curchunks; stats_chunks.highchunks = stats_chunks.curchunks;
malloc_mutex_unlock(&chunks_mtx);
}
#endif #endif
assert(CHUNK_ADDR2BASE(ret) == ret); assert(CHUNK_ADDR2BASE(ret) == ret);
@ -85,7 +88,9 @@ chunk_dealloc(void *chunk, size_t size)
assert((size & chunksize_mask) == 0); assert((size & chunksize_mask) == 0);
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
malloc_mutex_lock(&chunks_mtx);
stats_chunks.curchunks -= (size / chunksize); stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
#endif #endif
#ifdef JEMALLOC_SWAP #ifdef JEMALLOC_SWAP
@ -110,6 +115,8 @@ chunk_boot(void)
chunk_npages = (chunksize >> PAGE_SHIFT); chunk_npages = (chunksize >> PAGE_SHIFT);
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
if (malloc_mutex_init(&chunks_mtx))
return (true);
memset(&stats_chunks, 0, sizeof(chunk_stats_t)); memset(&stats_chunks, 0, sizeof(chunk_stats_t));
#endif #endif

View File

@ -243,7 +243,7 @@ chunk_dealloc_dss(void *chunk, size_t size)
goto RETURN; goto RETURN;
} }
ret = true ret = true;
RETURN: RETURN:
malloc_mutex_unlock(&dss_mtx); malloc_mutex_unlock(&dss_mtx);
return (ret); return (ret);

View File

@ -6,12 +6,13 @@
malloc_mutex_t swap_mtx; malloc_mutex_t swap_mtx;
bool swap_enabled; bool swap_enabled;
bool swap_prezeroed;
size_t swap_nfds;
int *swap_fds;
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
size_t swap_avail; size_t swap_avail;
#endif #endif
static bool swap_prezeroed;
/* Base address of the mmap()ed file(s). */ /* Base address of the mmap()ed file(s). */
static void *swap_base; static void *swap_base;
/* Current end of the space in use (<= swap_max). */ /* Current end of the space in use (<= swap_max). */
@ -318,12 +319,21 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
swap_end = swap_base; swap_end = swap_base;
swap_max = (void *)((uintptr_t)vaddr + cumsize); swap_max = (void *)((uintptr_t)vaddr + cumsize);
swap_enabled = true; /* Copy the fds array for mallctl purposes. */
swap_fds = (int *)base_alloc(nfds * sizeof(int));
if (swap_fds == NULL) {
ret = true;
goto RETURN;
}
memcpy(swap_fds, fds, nfds * sizeof(int));
swap_nfds = nfds;
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
swap_avail = cumsize; swap_avail = cumsize;
#endif #endif
swap_enabled = true;
ret = false; ret = false;
RETURN: RETURN:
malloc_mutex_unlock(&swap_mtx); malloc_mutex_unlock(&swap_mtx);
@ -338,10 +348,12 @@ chunk_swap_boot(void)
return (true); return (true);
swap_enabled = false; swap_enabled = false;
swap_prezeroed = false; /* swap.* mallctl's depend on this. */
swap_nfds = 0;
swap_fds = NULL;
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
swap_avail = 0; swap_avail = 0;
#endif #endif
swap_prezeroed = false;
swap_base = NULL; swap_base = NULL;
swap_end = NULL; swap_end = NULL;
swap_max = NULL; swap_max = NULL;

1291
jemalloc/src/jemalloc_ctl.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,39 @@
#define JEMALLOC_STATS_C_ #define JEMALLOC_STATS_C_
#include "internal/jemalloc_internal.h" #include "internal/jemalloc_internal.h"
#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
xmallctl(n, v, &sz, NULL, 0); \
} while (0)
#define CTL_I_GET(n, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = i; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_J_GET(n, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
#define CTL_IJ_GET(n, v, t) do { \
size_t mib[6]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = i; \
mib[4] = j; \
xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
@ -9,9 +42,17 @@ bool opt_stats_print = false;
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
static void #ifdef JEMALLOC_STATS
malloc_vcprintf(void (*write4)(void *, const char *, const char *, const char *, static void malloc_vcprintf(void (*write4)(void *, const char *,
const char *), void *w4opaque, const char *format, va_list ap); const char *, const char *, const char *), void *w4opaque,
const char *format, va_list ap);
static void stats_arena_bins_print(void (*write4)(void *, const char *,
const char *, const char *, const char *), void *w4opaque, unsigned i);
static void stats_arena_lruns_print(void (*write4)(void *, const char *,
const char *, const char *, const char *), void *w4opaque, unsigned i);
static void stats_arena_print(void (*write4)(void *, const char *,
const char *, const char *, const char *), void *w4opaque, unsigned i);
#endif
/******************************************************************************/ /******************************************************************************/
@ -106,10 +147,221 @@ malloc_printf(const char *format, ...)
} }
#endif #endif
#ifdef JEMALLOC_STATS
static void
stats_arena_bins_print(void (*write4)(void *, const char *, const char *,
const char *, const char *), void *w4opaque, unsigned i)
{
size_t pagesize;
bool config_tcache;
unsigned nbins, j, gap_start;
CTL_GET("arenas.pagesize", &pagesize, size_t);
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write4, w4opaque,
"bins: bin size regs pgs nrequests "
"nfills nflushes newruns reruns maxruns curruns\n");
} else {
malloc_cprintf(write4, w4opaque,
"bins: bin size regs pgs nrequests "
"newruns reruns maxruns curruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
uint64_t nruns;
CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
if (nruns == 0) {
if (gap_start == UINT_MAX)
gap_start = j;
} else {
unsigned ntbins_, nqbins, ncbins, nsbins;
size_t reg_size, run_size;
uint32_t nregs;
uint64_t nrequests, nfills, nflushes, reruns;
size_t highruns, curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write4, w4opaque,
"[%u..%u]\n", gap_start,
j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write4, w4opaque,
"[%u]\n", gap_start);
}
gap_start = UINT_MAX;
}
CTL_GET("arenas.ntbins", &ntbins_, unsigned);
CTL_GET("arenas.nqbins", &nqbins, unsigned);
CTL_GET("arenas.ncbins", &ncbins, unsigned);
CTL_GET("arenas.nsbins", &nsbins, unsigned);
CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
&nrequests, uint64_t);
if (config_tcache) {
CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
&nfills, uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
&nflushes, uint64_t);
}
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns,
size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write4, w4opaque,
"%13u %1s %5u %4u %3u %10"PRIu64" %9"PRIu64
" %9"PRIu64" %9"PRIu64""
" %9"PRIu64" %7zu %7zu\n",
j,
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
j < ntbins_ + nqbins + ncbins + nsbins ? "S"
: "M",
reg_size, nregs, run_size / pagesize,
nrequests, nfills, nflushes, nruns, reruns,
highruns, curruns);
} else {
malloc_cprintf(write4, w4opaque,
"%13u %1s %5u %4u %3u %10"PRIu64" %9"PRIu64
" %9"PRIu64" %7zu %7zu\n",
j,
j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
"Q" : j < ntbins_ + nqbins + ncbins ? "C" :
j < ntbins_ + nqbins + ncbins + nsbins ? "S"
: "M",
reg_size, nregs, run_size / pagesize,
nrequests, nruns, reruns, highruns,
curruns);
}
}
}
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
/* Gap of more than one size class. */
malloc_cprintf(write4, w4opaque, "[%u..%u]\n",
gap_start, j - 1);
} else {
/* Gap of one size class. */
malloc_cprintf(write4, w4opaque, "[%u]\n", gap_start);
}
}
}
static void
stats_arena_lruns_print(void (*write4)(void *, const char *, const char *,
const char *, const char *), void *w4opaque, unsigned i)
{
size_t pagesize, nlruns, j;
ssize_t gap_start;
CTL_GET("arenas.pagesize", &pagesize, size_t);
malloc_cprintf(write4, w4opaque,
"large: size pages nrequests maxruns curruns\n");
CTL_GET("arenas.nlruns", &nlruns, unsigned);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nrequests;
size_t run_size, highruns, curruns;
CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
uint64_t);
if (nrequests == 0) {
if (gap_start == -1)
gap_start = j;
} else {
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns,
size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
malloc_cprintf(write4, w4opaque, "[%zu]\n",
j - gap_start);
gap_start = -1;
}
malloc_cprintf(write4, w4opaque,
"%13zu %5zu %9"PRIu64" %9zu %9zu\n",
run_size, run_size / pagesize, nrequests, highruns,
curruns);
}
}
if (gap_start != -1)
malloc_cprintf(write4, w4opaque, "[%zu]\n", j - gap_start);
}
static void
stats_arena_print(void (*write4)(void *, const char *, const char *,
const char *, const char *), void *w4opaque, unsigned i)
{
size_t pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc;
size_t medium_allocated;
uint64_t medium_nmalloc, medium_ndalloc;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc;
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
malloc_cprintf(write4, w4opaque,
"dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
" %"PRIu64" madvise%s, %"PRIu64" purged\n",
pactive, pdirty, npurge, npurge == 1 ? "" : "s",
nmadvise, nmadvise == 1 ? "" : "s", purged);
malloc_cprintf(write4, w4opaque,
" allocated nmalloc ndalloc\n");
CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
malloc_cprintf(write4, w4opaque,
"small: %12zu %12"PRIu64" %12"PRIu64"\n",
small_allocated, small_nmalloc, small_ndalloc);
CTL_I_GET("stats.arenas.0.medium.allocated", &medium_allocated, size_t);
CTL_I_GET("stats.arenas.0.medium.nmalloc", &medium_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.medium.ndalloc", &medium_ndalloc, uint64_t);
malloc_cprintf(write4, w4opaque,
"medium: %12zu %12"PRIu64" %12"PRIu64"\n",
medium_allocated, medium_nmalloc, medium_ndalloc);
CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
malloc_cprintf(write4, w4opaque,
"large: %12zu %12"PRIu64" %12"PRIu64"\n",
large_allocated, large_nmalloc, large_ndalloc);
malloc_cprintf(write4, w4opaque,
"total: %12zu %12"PRIu64" %12"PRIu64"\n",
small_allocated + medium_allocated + large_allocated,
small_nmalloc + medium_nmalloc + large_nmalloc,
small_ndalloc + medium_ndalloc + large_ndalloc);
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write4, w4opaque, "mapped: %12zu\n", mapped);
stats_arena_bins_print(write4, w4opaque, i);
stats_arena_lruns_print(write4, w4opaque, i);
}
#endif
void void
stats_print(void (*write4)(void *, const char *, const char *, const char *, stats_print(void (*write4)(void *, const char *, const char *, const char *,
const char *), void *w4opaque, const char *opts) const char *), void *w4opaque, const char *opts)
{ {
uint64_t epoch;
size_t u64sz;
char s[UMAX2S_BUFSIZE]; char s[UMAX2S_BUFSIZE];
bool general = true; bool general = true;
bool merged = true; bool merged = true;
@ -117,6 +369,11 @@ stats_print(void (*write4)(void *, const char *, const char *, const char *,
bool bins = true; bool bins = true;
bool large = true; bool large = true;
/* Refresh stats, in case mallctl() was called by the application. */
epoch = 1;
u64sz = sizeof(uint64_t);
xmallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (write4 == NULL) { if (write4 == NULL) {
/* /*
* The caller did not provide an alternate write4 callback * The caller did not provide an alternate write4 callback
@ -154,165 +411,175 @@ stats_print(void (*write4)(void *, const char *, const char *, const char *,
write4(w4opaque, "___ Begin jemalloc statistics ___\n", "", "", ""); write4(w4opaque, "___ Begin jemalloc statistics ___\n", "", "", "");
if (general) { if (general) {
write4(w4opaque, "Assertions ", int err;
#ifdef NDEBUG bool bv;
"disabled", unsigned uv;
#else ssize_t ssv;
"enabled", size_t sv, bsz, usz, ssz, sssz;
#endif
bsz = sizeof(bool);
usz = sizeof(unsigned);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
CTL_GET("config.debug", &bv, bool);
write4(w4opaque, "Assertions ", bv ? "enabled" : "disabled",
"\n", ""); "\n", "");
write4(w4opaque, "Boolean JEMALLOC_OPTIONS: ",
opt_abort ? "A" : "a", "", ""); write4(w4opaque, "Boolean JEMALLOC_OPTIONS: ", "", "", "");
#ifdef JEMALLOC_FILL if ((err = mallctl("opt.abort", &bv, &bsz, NULL, 0)) == 0)
write4(w4opaque, opt_junk ? "J" : "j", "", "", ""); write4(w4opaque, bv ? "A" : "a", "", "", "");
#endif if ((err = mallctl("opt.junk", &bv, &bsz, NULL, 0)) == 0)
#ifdef JEMALLOC_SWAP write4(w4opaque, bv ? "J" : "j", "", "", "");
write4(w4opaque, opt_overcommit ? "O" : "o", "", "", ""); if ((err = mallctl("opt.overcommit", &bv, &bsz, NULL, 0)) == 0)
#endif write4(w4opaque, bv ? "O" : "o", "", "", "");
write4(w4opaque, "P", "", "", ""); write4(w4opaque, "P", "", "", "");
#ifdef JEMALLOC_TCACHE if ((err = mallctl("opt.tcache_sort", &bv, &bsz, NULL, 0)) == 0)
write4(w4opaque, opt_tcache_sort ? "S" : "s", "", "", ""); write4(w4opaque, bv ? "S" : "s", "", "", "");
#endif if ((err = mallctl("opt.trace", &bv, &bsz, NULL, 0)) == 0)
#ifdef JEMALLOC_TRACE write4(w4opaque, bv ? "T" : "t", "", "", "");
write4(w4opaque, opt_trace ? "T" : "t", "", "", ""); if ((err = mallctl("opt.sysv", &bv, &bsz, NULL, 0)) == 0)
#endif write4(w4opaque, bv ? "V" : "v", "", "", "");
#ifdef JEMALLOC_SYSV if ((err = mallctl("opt.xmalloc", &bv, &bsz, NULL, 0)) == 0)
write4(w4opaque, opt_sysv ? "V" : "v", "", "", ""); write4(w4opaque, bv ? "X" : "x", "", "", "");
#endif if ((err = mallctl("opt.zero", &bv, &bsz, NULL, 0)) == 0)
#ifdef JEMALLOC_XMALLOC write4(w4opaque, bv ? "Z" : "z", "", "", "");
write4(w4opaque, opt_xmalloc ? "X" : "x", "", "", "");
#endif
#ifdef JEMALLOC_FILL
write4(w4opaque, opt_zero ? "Z" : "z", "", "", "");
#endif
write4(w4opaque, "\n", "", "", ""); write4(w4opaque, "\n", "", "", "");
write4(w4opaque, "CPUs: ", umax2s(ncpus, 10, s), "\n", ""); write4(w4opaque, "CPUs: ", umax2s(ncpus, 10, s), "\n", "");
write4(w4opaque, "Max arenas: ", umax2s(narenas, 10, s), "\n",
""); CTL_GET("arenas.narenas", &uv, unsigned);
write4(w4opaque, "Max arenas: ", umax2s(uv, 10, s), "\n", "");
write4(w4opaque, "Pointer size: ", umax2s(sizeof(void *), 10, write4(w4opaque, "Pointer size: ", umax2s(sizeof(void *), 10,
s), "\n", ""); s), "\n", "");
write4(w4opaque, "Quantum size: ", umax2s(QUANTUM, 10, s), "\n",
CTL_GET("arenas.quantum", &sv, size_t);
write4(w4opaque, "Quantum size: ", umax2s(sv, 10, s), "\n", "");
CTL_GET("arenas.cacheline", &sv, size_t);
write4(w4opaque, "Cacheline size (assumed): ", umax2s(sv, 10,
s), "\n", "");
CTL_GET("arenas.subpage", &sv, size_t);
write4(w4opaque, "Subpage spacing: ", umax2s(sv, 10, s), "\n",
""); "");
write4(w4opaque, "Cacheline size (assumed): ", umax2s(CACHELINE,
10, s), CTL_GET("arenas.medium", &sv, size_t);
"\n", ""); write4(w4opaque, "Medium spacing: ", umax2s(sv, 10, s), "\n",
write4(w4opaque, "Subpage spacing: ", umax2s(SUBPAGE, 10, s),
"\n", "");
write4(w4opaque, "Medium spacing: ", umax2s((1U << lg_mspace),
10, s), "\n", "");
#ifdef JEMALLOC_TINY
write4(w4opaque, "Tiny 2^n-spaced sizes: [", umax2s((1U <<
LG_TINY_MIN), 10, s), "..", "");
write4(w4opaque, umax2s((qspace_min >> 1), 10, s), "]\n", "",
""); "");
#endif
write4(w4opaque, "Quantum-spaced sizes: [", umax2s(qspace_min, if ((err = mallctl("arenas.tspace_min", &sv, &ssz, NULL, 0)) ==
0) {
write4(w4opaque, "Tiny 2^n-spaced sizes: [", umax2s(sv,
10, s), "..", ""); 10, s), "..", "");
write4(w4opaque, umax2s(qspace_max, 10, s), "]\n", "", "");
write4(w4opaque, "Cacheline-spaced sizes: [", umax2s(cspace_min, CTL_GET("arenas.tspace_max", &sv, size_t);
10, s), "..", ""); write4(w4opaque, umax2s(sv, 10, s), "]\n", "", "");
write4(w4opaque, umax2s(cspace_max, 10, s), "]\n", "", ""); }
write4(w4opaque, "Subpage-spaced sizes: [", umax2s(sspace_min,
10, s), "..", ""); CTL_GET("arenas.qspace_min", &sv, size_t);
write4(w4opaque, umax2s(sspace_max, 10, s), "]\n", "", ""); write4(w4opaque, "Quantum-spaced sizes: [", umax2s(sv, 10, s),
write4(w4opaque, "Medium sizes: [", umax2s(medium_min, 10, s),
"..", ""); "..", "");
write4(w4opaque, umax2s(medium_max, 10, s), "]\n", "", ""); CTL_GET("arenas.qspace_max", &sv, size_t);
if (opt_lg_dirty_mult >= 0) { write4(w4opaque, umax2s(sv, 10, s), "]\n", "", "");
CTL_GET("arenas.cspace_min", &sv, size_t);
write4(w4opaque, "Cacheline-spaced sizes: [", umax2s(sv, 10, s),
"..", "");
CTL_GET("arenas.cspace_max", &sv, size_t);
write4(w4opaque, umax2s(sv, 10, s), "]\n", "", "");
CTL_GET("arenas.sspace_min", &sv, size_t);
write4(w4opaque, "Subpage-spaced sizes: [", umax2s(sv, 10, s),
"..", "");
CTL_GET("arenas.sspace_max", &sv, size_t);
write4(w4opaque, umax2s(sv, 10, s), "]\n", "", "");
CTL_GET("arenas.medium_min", &sv, size_t);
write4(w4opaque, "Medium sizes: [", umax2s(sv, 10, s), "..",
"");
CTL_GET("arenas.medium_max", &sv, size_t);
write4(w4opaque, umax2s(sv, 10, s), "]\n", "", "");
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
write4(w4opaque, write4(w4opaque,
"Min active:dirty page ratio per arena: ", "Min active:dirty page ratio per arena: ",
umax2s((1U << opt_lg_dirty_mult), 10, s), ":1\n", umax2s((1U << ssv), 10, s), ":1\n", "");
"");
} else { } else {
write4(w4opaque, write4(w4opaque,
"Min active:dirty page ratio per arena: N/A\n", "", "Min active:dirty page ratio per arena: N/A\n", "",
"", ""); "", "");
} }
#ifdef JEMALLOC_TCACHE #ifdef JEMALLOC_TCACHE
if ((err = mallctl("opt.lg_tcache_nslots", &sv, &ssz, NULL, 0))
== 0) {
size_t tcache_nslots, tcache_gc_sweep;
tcache_nslots = (1U << sv);
write4(w4opaque, "Thread cache slots per size class: ", write4(w4opaque, "Thread cache slots per size class: ",
tcache_nslots ? umax2s(tcache_nslots, 10, s) : "N/A", "\n", tcache_nslots ? umax2s(tcache_nslots, 10, s) :
""); "N/A", "\n", "");
CTL_GET("opt.lg_tcache_gc_sweep", &ssv, ssize_t);
tcache_gc_sweep = (1U << ssv);
write4(w4opaque, "Thread cache GC sweep interval: ", write4(w4opaque, "Thread cache GC sweep interval: ",
(tcache_nslots && tcache_gc_incr > 0) ? tcache_nslots && ssv >= 0 ? umax2s(tcache_gc_sweep,
umax2s((1U << opt_lg_tcache_gc_sweep), 10, s) : "N/A", 10, s) : "N/A", "\n", "");
"", ""); }
write4(w4opaque, " (increment interval: ",
(tcache_nslots && tcache_gc_incr > 0) ?
umax2s(tcache_gc_incr, 10, s) : "N/A",
")\n", "");
#endif #endif
write4(w4opaque, "Chunk size: ", umax2s(chunksize, 10, s), "", CTL_GET("arenas.chunksize", &sv, size_t);
""); write4(w4opaque, "Chunk size: ", umax2s(sv, 10, s), "", "");
write4(w4opaque, " (2^", umax2s(opt_lg_chunk, 10, s), ")\n", CTL_GET("opt.lg_chunk", &sv, size_t);
""); write4(w4opaque, " (2^", umax2s(sv, 10, s), ")\n", "");
} }
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
{ {
int err;
size_t ssz, u64sz;
size_t allocated, mapped; size_t allocated, mapped;
unsigned i; size_t chunks_current, chunks_high, swap_avail;
arena_t *arena; uint64_t chunks_total;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc;
/* Calculate and print allocated/mapped stats. */ ssz = sizeof(size_t);
u64sz = sizeof(uint64_t);
/* arenas. */
for (i = 0, allocated = 0; i < narenas; i++) {
if (arenas[i] != NULL) {
malloc_mutex_lock(&arenas[i]->lock);
allocated += arenas[i]->stats.allocated_small;
allocated += arenas[i]->stats.allocated_large;
malloc_mutex_unlock(&arenas[i]->lock);
}
}
/* huge/base. */
malloc_mutex_lock(&huge_mtx);
allocated += huge_allocated;
mapped = stats_chunks.curchunks * chunksize;
malloc_mutex_unlock(&huge_mtx);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write4, w4opaque, malloc_cprintf(write4, w4opaque,
"Allocated: %zu, mapped: %zu\n", allocated, mapped); "Allocated: %zu, mapped: %zu\n", allocated, mapped);
/* Print chunk stats. */ /* Print chunk stats. */
{ CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
chunk_stats_t chunks_stats; CTL_GET("stats.chunks.high", &chunks_high, size_t);
#ifdef JEMALLOC_SWAP CTL_GET("stats.chunks.current", &chunks_current, size_t);
size_t swap_avail_chunks; if ((err = mallctl("swap.avail", &swap_avail, &ssz,
#endif NULL, 0)) == 0) {
size_t lg_chunk;
malloc_mutex_lock(&huge_mtx);
chunks_stats = stats_chunks;
malloc_mutex_unlock(&huge_mtx);
#ifdef JEMALLOC_SWAP
malloc_mutex_lock(&swap_mtx);
swap_avail_chunks = swap_avail >> opt_lg_chunk;
malloc_mutex_unlock(&swap_mtx);
#endif
malloc_cprintf(write4, w4opaque, "chunks: nchunks " malloc_cprintf(write4, w4opaque, "chunks: nchunks "
"highchunks curchunks" "highchunks curchunks swap_avail\n");
#ifdef JEMALLOC_SWAP CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
" swap_avail"
#endif
"\n");
malloc_cprintf(write4, w4opaque, malloc_cprintf(write4, w4opaque,
" %13"PRIu64"%13zu%13zu" " %13"PRIu64"%13zu%13zu%13zu\n",
#ifdef JEMALLOC_SWAP chunks_total, chunks_high, chunks_current,
"%13zu" swap_avail << lg_chunk);
#endif } else {
"\n", malloc_cprintf(write4, w4opaque, "chunks: nchunks "
chunks_stats.nchunks, chunks_stats.highchunks, "highchunks curchunks\n");
chunks_stats.curchunks malloc_cprintf(write4, w4opaque,
#ifdef JEMALLOC_SWAP " %13"PRIu64"%13zu%13zu\n",
, swap_avail_chunks chunks_total, chunks_high, chunks_current);
#endif
);
} }
/* Print chunk stats. */ /* Print huge stats. */
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
malloc_cprintf(write4, w4opaque, malloc_cprintf(write4, w4opaque,
"huge: nmalloc ndalloc allocated\n"); "huge: nmalloc ndalloc allocated\n");
malloc_cprintf(write4, w4opaque, malloc_cprintf(write4, w4opaque,
@ -320,56 +587,58 @@ stats_print(void (*write4)(void *, const char *, const char *, const char *,
huge_nmalloc, huge_ndalloc, huge_allocated); huge_nmalloc, huge_ndalloc, huge_allocated);
if (merged) { if (merged) {
unsigned nmerged; unsigned narenas;
size_t nactive, ndirty; size_t usz;
arena_stats_t astats;
malloc_bin_stats_t bstats[nbins];
malloc_large_stats_t lstats[((chunksize - PAGE_SIZE) >>
PAGE_SHIFT)];
nactive = 0; usz = sizeof(unsigned);
ndirty = 0; CTL_GET("arenas.narenas", &narenas, unsigned);
memset(&astats, 0, sizeof(astats)); {
memset(bstats, 0, sizeof(bstats)); bool initialized[narenas];
memset(lstats, 0, sizeof(lstats)); size_t isz;
unsigned i, ninitialized;
/* Create merged arena stats. */ isz = sizeof(initialized);
for (i = nmerged = 0; i < narenas; i++) { xmallctl("arenas.initialized", initialized,
arena = arenas[i]; &isz, NULL, 0);
if (arena != NULL) { for (i = ninitialized = 0; i < narenas; i++) {
malloc_mutex_lock(&arena->lock); if (initialized[i])
arena_stats_merge(arena, &nactive, ninitialized++;
&ndirty, &astats, bstats, lstats);
malloc_mutex_unlock(&arena->lock);
nmerged++;
}
} }
if (nmerged > 1) { if (ninitialized > 1) {
/* Print merged arena stats. */ /* Print merged arena stats. */
malloc_cprintf(write4, w4opaque, malloc_cprintf(write4, w4opaque,
"\nMerge arenas stats:\n"); "\nMerge arenas stats:\n");
/* stats_arena_print(write4, w4opaque,
* arenas[0] is used only for invariant bin narenas);
* settings. }
*/
arena_stats_mprint(arenas[0], nactive, ndirty,
&astats, bstats, lstats, bins, large,
write4, w4opaque);
} }
} }
if (unmerged) { if (unmerged) {
unsigned narenas;
size_t usz;
/* Print stats for each arena. */ /* Print stats for each arena. */
usz = sizeof(unsigned);
CTL_GET("arenas.narenas", &narenas, unsigned);
{
bool initialized[narenas];
size_t isz;
unsigned i;
isz = sizeof(initialized);
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = 0; i < narenas; i++) { for (i = 0; i < narenas; i++) {
arena = arenas[i]; if (initialized[i]) {
if (arena != NULL) {
malloc_cprintf(write4, w4opaque, malloc_cprintf(write4, w4opaque,
"\narenas[%u]:\n", i); "\narenas[%u]:\n", i);
malloc_mutex_lock(&arena->lock); stats_arena_print(write4,
arena_stats_print(arena, bins, large, w4opaque, i);
write4, w4opaque); }
malloc_mutex_unlock(&arena->lock);
} }
} }
} }

View File

@ -254,11 +254,13 @@ trace_thread_cleanup(void *arg)
trace_thread_exit(); trace_thread_exit();
} }
void bool
trace_boot(void) trace_boot(void)
{ {
malloc_mutex_init(&trace_mtx); if (malloc_mutex_init(&trace_mtx))
return (true);
/* Flush trace buffers at exit. */ /* Flush trace buffers at exit. */
atexit(malloc_trace_flush_all); atexit(malloc_trace_flush_all);
/* Receive thread exit notifications. */ /* Receive thread exit notifications. */
@ -267,6 +269,8 @@ trace_boot(void)
": Error in pthread_key_create()\n", "", ""); ": Error in pthread_key_create()\n", "", "");
abort(); abort();
} }
return (false);
} }
/******************************************************************************/ /******************************************************************************/
#endif /* JEMALLOC_TRACE */ #endif /* JEMALLOC_TRACE */