diff --git a/jemalloc/Makefile.in b/jemalloc/Makefile.in index 92b8151b..57bfe4d6 100644 --- a/jemalloc/Makefile.in +++ b/jemalloc/Makefile.in @@ -40,10 +40,10 @@ CHDRS := @objroot@src/jemalloc@install_suffix@.h \ CSRCS := @srcroot@src/jemalloc.c @srcroot@src/jemalloc_arena.c \ @srcroot@src/jemalloc_base.c @srcroot@src/jemalloc_chunk.c \ @srcroot@src/jemalloc_chunk_dss.c @srcroot@src/jemalloc_chunk_mmap.c \ - @srcroot@src/jemalloc_chunk_swap.c @srcroot@src/jemalloc_extent.c \ - @srcroot@src/jemalloc_huge.c @srcroot@src/jemalloc_mutex.c \ - @srcroot@src/jemalloc_stats.c @srcroot@src/jemalloc_tcache.c \ - @srcroot@src/jemalloc_trace.c + @srcroot@src/jemalloc_chunk_swap.c @srcroot@src/jemalloc_ctl.c \ + @srcroot@src/jemalloc_extent.c @srcroot@src/jemalloc_huge.c \ + @srcroot@src/jemalloc_mutex.c @srcroot@src/jemalloc_stats.c \ + @srcroot@src/jemalloc_tcache.c @srcroot@src/jemalloc_trace.c DSOS := @objroot@lib/libjemalloc@install_suffix@.so.$(REV) \ @objroot@lib/libjemalloc@install_suffix@.so \ @objroot@lib/libjemalloc@install_suffix@_pic.a diff --git a/jemalloc/doc/jemalloc.3.in b/jemalloc/doc/jemalloc.3.in index 46feab43..f6c1daca 100644 --- a/jemalloc/doc/jemalloc.3.in +++ b/jemalloc/doc/jemalloc.3.in @@ -38,7 +38,7 @@ .\" @(#)malloc.3 8.1 (Berkeley) 6/4/93 .\" $FreeBSD: head/lib/libc/stdlib/malloc.3 182225 2008-08-27 02:00:53Z jasone $ .\" -.Dd January 23, 2010 +.Dd January 27, 2010 .Dt JEMALLOC 3 .Os .Sh NAME @@ -48,12 +48,13 @@ .Nm @jemalloc_prefix@realloc , .Nm @jemalloc_prefix@free , .Nm @jemalloc_prefix@malloc_usable_size , -@roff_swap@.Nm @jemalloc_prefix@malloc_swap_enable , -@roff_tcache@.Nm @jemalloc_prefix@malloc_tcache_flush , -.Nm @jemalloc_prefix@malloc_stats_print +.Nm @jemalloc_prefix@malloc_stats_print , +.Nm @jemalloc_prefix@mallctl , +.Nm @jemalloc_prefix@mallctlnametomib , +.Nm @jemalloc_prefix@mallctlbymib .Nd general purpose memory allocation functions .Sh LIBRARY -.Lb libjemalloc@install_suffix@ +.Sy libjemalloc@install_suffix@ .Sh SYNOPSIS .In stdlib.h .In jemalloc@install_suffix@.h @@ -69,18 +70,18 @@ .Fn @jemalloc_prefix@free "void *ptr" .Ft size_t .Fn @jemalloc_prefix@malloc_usable_size "const void *ptr" -@roff_swap@.Ft int -@roff_swap@.Fn @jemalloc_prefix@malloc_swap_enable "const int *fds" "unsigned nfds" "int prezeroed" -@roff_tcache@.Ft void -@roff_tcache@.Fn @jemalloc_prefix@malloc_tcache_flush "void" .Ft void .Fn @jemalloc_prefix@malloc_stats_print "void (*write4)(void *" "const char *" "const char *" "const char *" "const char *)" "const char *opts" +.Ft int +.Fn @jemalloc_prefix@mallctl "const char *name" "void *oldp" "size_t *oldlenp" "void *newp" "size_t newlen" +.Ft int +.Fn @jemalloc_prefix@mallctlnametomib "const char *name" "int *mibp" "size_t *miblenp" +.Ft int +.Fn @jemalloc_prefix@mallctlbymib "const size_t *mib" "size_t miblen" "void *oldp" "size_t *oldlenp" "void *newp" "size_t newlen" .Ft const char * .Va @jemalloc_prefix@malloc_options ; .Ft void -.Fo \*(lp*@jemalloc_prefix@malloc_message\*(rp -.Fa "void *w4opaque" "const char *p1" "const char *p2" "const char *p3" "const char *p4" -.Fc +.Fn \*(lp*@jemalloc_prefix@malloc_message\*(rp "void *w4opaque" "const char *p1" "const char *p2" "const char *p3" "const char *p4" .Sh DESCRIPTION The .Fn @jemalloc_prefix@malloc @@ -173,39 +174,6 @@ Any discrepancy between the requested allocation size and the size reported by .Fn @jemalloc_prefix@malloc_usable_size should not be depended on, since such behavior is entirely implementation-dependent. -@roff_swap@.Pp -@roff_swap@The -@roff_swap@.Fn @jemalloc_prefix@malloc_swap_enable -@roff_swap@function opens and contiguously maps a list of -@roff_swap@.Fa nfds -@roff_swap@file descriptors pointed to by -@roff_swap@.Fa fds -@roff_swap@via -@roff_swap@.Xr mmap 2 . -@roff_swap@The resulting virtual memory region is preferred over anonymous -@roff_swap@.Xr mmap 2 -@roff_swap@@roff_dss@and -@roff_swap@@roff_dss@.Xr sbrk 2 -@roff_swap@memory. -@roff_swap@Note that if a file's size is not a multiple of the page size, it is -@roff_swap@automatically truncated to the nearest page size multiple. -@roff_swap@If -@roff_swap@.Fa prezeroed -@roff_swap@is non-zero, the allocator assumes that the file(s) contain nothing -@roff_swap@but nil bytes. -@roff_swap@If this assumption is violated, allocator behavior is undefined. -@roff_tcache@.Pp -@roff_tcache@The -@roff_tcache@.Fn @jemalloc_prefix@malloc_tcache_flush -@roff_tcache@function releases all cached objects and internal data structures -@roff_tcache@associated with the calling thread's thread-specific cache. -@roff_tcache@Ordinarily, this function need not be called, since automatic -@roff_tcache@periodic incremental garbage collection occurs, and the thread -@roff_tcache@cache is automatically discarded when a thread exits. -@roff_tcache@However, garbage collection is triggered by allocation activity, -@roff_tcache@so it is possible for a thread that stops allocating/deallocating -@roff_tcache@to retain its cache indefinitely, in which case the developer may -@roff_tcache@find this function useful. .Pp The .Fn @jemalloc_prefix@malloc_stats_print @@ -228,6 +196,12 @@ during execution can be omitted by specifying as a character within the .Fa opts string. +Note that +.Fn @jemalloc_prefix@malloc_message +uses the +.Fn @jemalloc_prefix@mallctl* +functions internally, so inconsistent statistics can be reported if multiple +threads use these functions simultaneously. @roff_stats@.Dq m @roff_stats@and @roff_stats@.Dq a @@ -242,6 +216,79 @@ Unrecognized characters are silently ignored. @roff_tcache@Note that thread caching may prevent some statistics from being @roff_tcache@completely up to date, since extra locking would be required to @roff_tcache@merge counters that track thread cache operations. +.Pp +The +.Fn @jemalloc_prefix@mallctl +function provides a general interface for introspecting the memory allocator, +as well as setting modifiable parameters and triggering actions. +The period-separated +.Fa name +argument specifies a location in a tree-structured namespace; see the +.Sx "MALLCTL NAMESPACE" +section for documentation on the tree contents. +To read a value, pass a pointer via +.Fa oldp +to adequate space to contain the value, and a pointer to its length via +.Fa oldlenp ; +otherwise pass +.Dv NULL +and +.Dv NULL . +Similarly, to write a value, pass a pointer to the value via +.Fa newp , +and its length via +.Fa newlen ; +otherwise pass +.Dv NULL +and 0. +.Pp +The +.Fn @jemalloc_prefix@mallctlnametomib +function provides a way to avoid repeated name lookups for applications that +repeatedly query the same portion of the namespace, by translating a name to a +.Dq Management Information Base +(MIB) that can be passed repeatedly to +.Fn @jemalloc_prefix@mallctlbymib . +Upon successful return from +.Fn @jemalloc_prefix@mallctlnametomib , +.Fa mibp +contains an array of +.Fa *miblenp +integers, where +.Fa *miblenp +is the lesser of the number of components in +.Fa name +and the input value of +.Fa *miblenp . +Thus it is possible to pass a +.Fa *miblenp +that is smaller than the number of period-separated name components, which +results in a partial MIB that can be used as the basis for constructing a +complete MIB. +For name components that are integers (e.g. the 2 in +.Qq arenas.bin.2.size ) , +the corresponding MIB component will always be that integer. +Therefore, it is legitimate to construct code like the following: +.Pp +.Bd -literal -offset indent -compact +unsigned nbins, i; +int mib[4]; +size_t len, miblen; + +len = sizeof(nbins); +@jemalloc_prefix@mallctl("arenas.nbins", &nbins, &len, NULL, 0); + +miblen = 4; +@jemalloc_prefix@mallnametomib("arenas.bin.0.size", mib, &miblen); +for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + @jemalloc_prefix@mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); + /* Do something with bin_size... */ +} +.Ed .Sh TUNING Once, when the first call is made to one of these memory allocation routines, various flags will be set or reset, which affects the @@ -347,8 +394,8 @@ times the number of CPUs, or one if there is a single CPU. @roff_swap@@roff_dss@.Xr sbrk 2 @roff_swap@for virtual memory allocation. @roff_swap@In order for overcommit to be disabled, the -@roff_swap@.Fn malloc_swap_enable -@roff_swap@function must have been successfully called. +@roff_swap@.Dq swap.fds +@roff_swap@mallctl must have been successfully written to. @roff_swap@This option is enabled by default. .It P The @@ -503,7 +550,7 @@ no more than the minimum cacheline-multiple size class (see the option) are rounded up to the nearest multiple of the @roff_tiny@quantum. @roff_no_tiny@quantum (8 or 16, depending on architecture). -Allocation requests that are more than the minumum cacheline-multiple size +Allocation requests that are more than the minimum cacheline-multiple size class, but no more than the minimum subpage-multiple size class (see the .Dq C option) are rounded up to the nearest multiple of the cacheline size (64). @@ -528,6 +575,592 @@ multi-threaded applications. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size. +.Sh MALLCTL NAMESPACE +The following names are defined in the namespace accessible via the +.Fn mallctl* +functions. +Value types are specified in parentheses, and their readable/writable statuses +are encoded as rw, r-, -w, or --. +A name element encoded as or indicates an integer component, where the +integer varies from 0 to some upper value that must be determined via +introspection. +@roff_stats@In the case of +@roff_stats@.Dq stats.arenas..* , +@roff_stats@ equal to +@roff_stats@.Dq arenas.narenas +@roff_stats@can be used to access the summation of statistics from all arenas. +.Bl -ohang +.\"----------------------------------------------------------------------------- +.It Sy "epoch (uint64_t) rw" +.Bd -ragged -offset indent -compact +If a value is passed in, refresh the data from which the +.Fn mallctl* +functions report values, and increment the epoch. +Return the current epoch. +This is useful for detecting whether another thread caused a refresh. +.Ed +.\"----------------------------------------------------------------------------- +@roff_tcache@.It Sy "tcache.flush (void) --" +@roff_tcache@.Bd -ragged -offset indent -compact +@roff_tcache@Flush calling thread's tcache. +@roff_tcache@This interface releases all cached objects and internal data +@roff_tcache@structures associated with the calling thread's thread-specific +@roff_tcache@cache. +@roff_tcache@Ordinarily, this interface need not be called, since automatic +@roff_tcache@periodic incremental garbage collection occurs, and the thread +@roff_tcache@cache is automatically discarded when a thread exits. +@roff_tcache@However, garbage collection is triggered by allocation activity, +@roff_tcache@so it is possible for a thread that stops allocating/deallocating +@roff_tcache@to retain its cache indefinitely, in which case the developer may +@roff_tcache@find manual flushing useful. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.debug (bool) r-" +.Bd -ragged -offset indent -compact +--enable-debug was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.dss (bool) r-" +.Bd -ragged -offset indent -compact +--enable-dss was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.dynamic_page_shift (bool) r-" +.Bd -ragged -offset indent -compact +--enable-dynamic-page-shift was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.fill (bool) r-" +.Bd -ragged -offset indent -compact +--enable-fill was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.lazy_lock (bool) r-" +.Bd -ragged -offset indent -compact +--enable-lazy-lock was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.stats (bool) r-" +.Bd -ragged -offset indent -compact +--enable-stats was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.swap (bool) r-" +.Bd -ragged -offset indent -compact +--enable-swap was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.sysv (bool) r-" +.Bd -ragged -offset indent -compact +--enable-sysv was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.tcache (bool) r-" +.Bd -ragged -offset indent -compact +--disable-tcache was not specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.tiny (bool) r-" +.Bd -ragged -offset indent -compact +--disable-tiny was not specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.tls (bool) r-" +.Bd -ragged -offset indent -compact +--disable-tls was not specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.trace (bool) r-" +.Bd -ragged -offset indent -compact +--enable-trace was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "config.xmalloc (bool) r-" +.Bd -ragged -offset indent -compact +--enable-xmalloc was specified during build configuration. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.abort (bool) r-" +.Bd -ragged -offset indent -compact +See the +.Dq A +option. +.Ed +.\"----------------------------------------------------------------------------- +@roff_fill@.It Sy "opt.junk (bool) r-" +@roff_fill@.Bd -ragged -offset indent -compact +@roff_fill@See the +@roff_fill@.Dq J +@roff_fill@option. +@roff_fill@.Ed +.\"----------------------------------------------------------------------------- +@roff_fill@.It Sy "opt.zero (bool) r-" +@roff_fill@.Bd -ragged -offset indent -compact +@roff_fill@See the +@roff_fill@.Dq Z +@roff_fill@option. +@roff_fill@.Ed +.\"----------------------------------------------------------------------------- +@roff_xmalloc@.It Sy "opt.xmalloc (bool) r-" +@roff_xmalloc@.Bd -ragged -offset indent -compact +@roff_xmalloc@See the +@roff_xmalloc@.Dq X +@roff_xmalloc@option. +@roff_xmalloc@.Ed +.\"----------------------------------------------------------------------------- +@roff_tcache@.It Sy "opt.lg_tcache_nslots (size_t) r-" +@roff_tcache@.Bd -ragged -offset indent -compact +@roff_tcache@See the +@roff_tcache@.Dq H +@roff_tcache@option. +@roff_tcache@.Ed +.\"----------------------------------------------------------------------------- +@roff_tcache@.It Sy "opt.lg_tcache_gc_sweep (ssize_t) r-" +@roff_tcache@.Bd -ragged -offset indent -compact +@roff_tcache@See the +@roff_tcache@.Dq G +@roff_tcache@option. +@roff_tcache@.Ed +.\"----------------------------------------------------------------------------- +@roff_tcache@.It Sy "opt.tcache_sort (bool) r-" +@roff_tcache@.Bd -ragged -offset indent -compact +@roff_tcache@See the +@roff_tcache@.Dq S +@roff_tcache@option. +@roff_tcache@.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.stats_print (bool) r-" +.Bd -ragged -offset indent -compact +See the +.Dq P +option. +.Ed +.\"----------------------------------------------------------------------------- +@roff_trace@.It Sy "opt.trace (bool) r-" +@roff_trace@.Bd -ragged -offset indent -compact +@roff_trace@See the +@roff_trace@.Dq T +@roff_trace@option. +@roff_trace@.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.lg_qspace_max (size_t) r-" +.Bd -ragged -offset indent -compact +See the +.Dq Q +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.lg_cspace_max (size_t) r-" +.Bd -ragged -offset indent -compact +See the +.Dq C +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.lg_medium_max (size_t) r-" +.Bd -ragged -offset indent -compact +See the +.Dq M +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.lg_dirty_mult (ssize_t) r-" +.Bd -ragged -offset indent -compact +See the +.Dq D +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.lg_chunk (size_t) r-" +.Bd -ragged -offset indent -compact +See the +.Dq K +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "opt.overcommit (bool) r-" +.Bd -ragged -offset indent -compact +See the +.Dq O +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.narenas (unsigned) r-" +.Bd -ragged -offset indent -compact +Maximum number of arenas. +See the +.Dq N +option. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.initialized (bool *) r-" +.Bd -ragged -offset indent -compact +An array of arenas.narenas booleans. +Each boolean indicates whether the corresponding arena is initialized. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.quantum (size_t) r-" +.Bd -ragged -offset indent -compact +Quantum size. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.cacheline (size_t) r-" +.Bd -ragged -offset indent -compact +Assumed cacheline size. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.subpage (size_t) r-" +.Bd -ragged -offset indent -compact +Subpage size class interval. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.pagesize (size_t) r-" +.Bd -ragged -offset indent -compact +Page size. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.medium (size_t) r-" +.Bd -ragged -offset indent -compact +Medium size class interval. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.chunksize (size_t) r-" +.Bd -ragged -offset indent -compact +Chunk size. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.tspace_min (size_t) r-" +.Bd -ragged -offset indent -compact +Minimum tiny size class. +Tiny size classes are powers of two. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.tspace_max (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum tiny size class. +Tiny size classes are powers of two. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.qspace_min (size_t) r-" +.Bd -ragged -offset indent -compact +Minimum quantum-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.qspace_max (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum quantum-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.cspace_min (size_t) r-" +.Bd -ragged -offset indent -compact +Minimum cacheline-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.cspace_max (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum cacheline-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.sspace_min (size_t) r-" +.Bd -ragged -offset indent -compact +Minimum subpage-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.sspace_max (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum subpage-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.medium_min (size_t) r-" +.Bd -ragged -offset indent -compact +Minimum medium-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.medium_max (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum medium-spaced size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.ntbins (unsigned) r-" +.Bd -ragged -offset indent -compact +Number of tiny bin size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.nqbins (unsigned) r-" +.Bd -ragged -offset indent -compact +Number of quantum-spaced bin size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.ncbins (unsigned) r-" +.Bd -ragged -offset indent -compact +Number of cacheline-spaced bin size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.nsbins (unsigned) r-" +.Bd -ragged -offset indent -compact +Number of subpage-spaced bin size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.nmbins (unsigned) r-" +.Bd -ragged -offset indent -compact +Number of medium-spaced bin size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.nbins (unsigned) r-" +.Bd -ragged -offset indent -compact +Total number of bin size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.bin..size (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum size supported by size class. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.bin..nregs (uint32_t) r-" +.Bd -ragged -offset indent -compact +Number of regions per page run. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.bin..run_size (size_t) r-" +.Bd -ragged -offset indent -compact +Number of bytes per page run. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.nlruns (size_t) r-" +.Bd -ragged -offset indent -compact +Total number of large size classes. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "arenas.lrun..size (size_t) r-" +.Bd -ragged -offset indent -compact +Maximum size supported by this large size class. +.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.allocated (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Total number of bytes allocated by the application. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.active (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Total number of bytes in active pages allocated by the application. +@roff_stats@This is a multiple of the page size, and is larger than +@roff_stats@.Dq stats.allocated . +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.mapped (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Total number of bytes in chunks mapped on behalf of the application. +@roff_stats@This is a multiple of the chunk size, and is at least as large as +@roff_stats@.Dq stats.active . +@roff_stats@@roff_swap@This does not include inactive chunks backed by swap +@roff_stats@@roff_swap@files. +@roff_stats@@roff_dss@This does not include inactive chunks embedded in the DSS. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.chunks.current (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Total number of chunks actively mapped on behalf of the application. +@roff_stats@@roff_swap@This does not include inactive chunks backed by swap +@roff_stats@@roff_swap@files. +@roff_stats@@roff_dss@This does not include inactive chunks embedded in the DSS. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.chunks.total (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of chunks allocated. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.chunks.high (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Maximum number of active chunks at any time thus far. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.huge.allocated (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of bytes currently allocated by huge objects. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.huge.nmalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of huge allocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.huge.ndalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of huge deallocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +.It Sy "stats.arenas..pactive (size_t) r-" +.Bd -ragged -offset indent -compact +Number of pages in active runs. +.Ed +.\"----------------------------------------------------------------------------- +.It Sy "stats.arenas..pdirty (size_t) r-" +.Bd -ragged -offset indent -compact +Number of pages within unused runs that are potentially dirty, and for which +.Fn madvise "..." "MADV_DONTNEED" +has not been called. +.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..mapped (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of mapped bytes. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..npurge (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of dirty page purge sweeps performed. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..nmadvise (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of +@roff_stats@.Fn madvise "..." "MADV_DONTNEED" +@roff_stats@calls made to purge dirty pages. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..npurged (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of pages purged. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..small.allocated (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of bytes currently allocated by small objects. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..small.nmalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of small allocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..small.ndalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of small deallocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..medium.allocated (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of bytes currently allocated by medium objects. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..medium.nmalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of medium allocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..medium.ndalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of medium deallocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..large.allocated (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Number of bytes currently allocated by large objects. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..large.nmalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of large allocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..large.ndalloc (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of large deallocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..bins..nrequests (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of allocation requests. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@@roff_tcache@.It Sy "stats.arenas..bins..nfills (uint64_t) r-" +@roff_stats@@roff_tcache@.Bd -ragged -offset indent -compact +@roff_stats@@roff_tcache@Cumulative number of tcache fills. +@roff_stats@@roff_tcache@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@@roff_tcache@.It Sy "stats.arenas..bins..nflushes (uint64_t) r-" +@roff_stats@@roff_tcache@.Bd -ragged -offset indent -compact +@roff_stats@@roff_tcache@Cumulative number of tcache flushes. +@roff_stats@@roff_tcache@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..bins..nruns (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of runs created. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..bins..nreruns (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of times the current run from which to allocate +@roff_stats@changed. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..bins..highruns (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Maximum number of runs at any time thus far. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..bins..curruns (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Current number of runs. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..lruns..nrequests (uint64_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Cumulative number of allocation requests for this size class. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..lruns..highruns (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Maximum number of runs at any time thus far for this size class. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@.It Sy "stats.arenas..lruns..curruns (size_t) r-" +@roff_stats@.Bd -ragged -offset indent -compact +@roff_stats@Current number of runs for this size class. +@roff_stats@.Ed +.\"----------------------------------------------------------------------------- +@roff_stats@@roff_swap@.It Sy "swap.avail (size_t) r-" +@roff_stats@@roff_swap@.Bd -ragged -offset indent -compact +@roff_stats@@roff_swap@Number of swap file bytes that are currently not +@roff_stats@@roff_swap@associated with any chunk (i.e. mapped, but otherwise +@roff_stats@@roff_swap@completely unmanaged). +@roff_stats@@roff_swap@.Ed +.\"----------------------------------------------------------------------------- +@roff_swap@.It Sy "swap.prezeroed (bool) rw" +@roff_swap@.Bd -ragged -offset indent -compact +@roff_swap@If true, the allocator assumes that the swap file(s) contain nothing +@roff_swap@but nil bytes. +@roff_swap@If this assumption is violated, allocator behavior is undefined. +@roff_swap@This value becomes read-only after +@roff_swap@.Dq swap.fds +@roff_swap@is successfully written to. +@roff_swap@.Ed +.\"----------------------------------------------------------------------------- +@roff_swap@.It Sy "swap.nfds (size_t) r-" +@roff_swap@.Bd -ragged -offset indent -compact +@roff_swap@Number of file descriptors in use for swap. +@roff_swap@.Ed +.\"----------------------------------------------------------------------------- +@roff_swap@.It Sy "swap.fds (int *) r-" +@roff_swap@.Bd -ragged -offset indent -compact +@roff_swap@When written to, the files associated with the specified file +@roff_swap@descriptors are contiguously mapped via +@roff_swap@.Xr mmap 2 . +@roff_swap@The resulting virtual memory region is preferred over anonymous +@roff_swap@.Xr mmap 2 +@roff_swap@@roff_dss@and +@roff_swap@@roff_dss@.Xr sbrk 2 +@roff_swap@memory. +@roff_swap@Note that if a file's size is not a multiple of the page size, it is +@roff_swap@automatically truncated to the nearest page size multiple. +@roff_swap@See the +@roff_swap@.Dq swap.prezeroed +@roff_swap@interface for specifying that the files are pre-zeroed. +@roff_swap@.Ed +.\"----------------------------------------------------------------------------- +.El .Sh DEBUGGING MALLOC PROBLEMS The first thing to do is to set the .Dq A @@ -646,11 +1279,43 @@ The .Fn @jemalloc_prefix@malloc_usable_size function returns the usable size of the allocation pointed to by .Fa ptr . -@roff_swap@.Pp -@roff_swap@The -@roff_swap@.Fn @jemalloc_prefix@malloc_swap_enable -@roff_swap@function returns the value 0 if successful; otherwise it returns a -@roff_swap@non-zero value. +.Pp +The +.Fn @jemalloc_prefix@mallctl , +.Fn @jemalloc_prefix@mallctlnametomib , +and +.Fn @jemalloc_prefix@mallctlbymib +functions return 0 on success; otherwise they return an error value. +The functions will fail if: +.Bl -tag -width Er +.It Bq Er EINVAL +.Fa newp +is +.Dv non-NULL , +and +.Fa newlen +is too large or too small. +Alternatively, +.Fa *oldlenp +is too large or too small; in this case as much data as possible are read +despite the error. +.It Bq Er ENOMEM +.Fa *oldlenp +is too short to hold the requested value. +.It Bq Er ENOENT +.Fa name +or +.Fa mib +specifies an unknown/invalid value. +.It Bq Er EPERM +Attempt to read or write void value, or attempt to write read-only value. +.It Bq Er EAGAIN +A memory allocation failure occurred. +.It Bq Er EFAULT +An interface with side effects failed in some way not directly related to +.Fn mallctl* +read/write processing. +.El .Sh ENVIRONMENT The following environment variables affect the execution of the allocation functions: diff --git a/jemalloc/src/internal/jemalloc_arena.h b/jemalloc/src/internal/jemalloc_arena.h index c4e63c5d..3c1947d4 100644 --- a/jemalloc/src/internal/jemalloc_arena.h +++ b/jemalloc/src/internal/jemalloc_arena.h @@ -391,6 +391,8 @@ extern size_t medium_max; extern size_t lg_mspace; extern size_t mspace_mask; +#define nlclasses ((chunksize - PAGE_SIZE) >> PAGE_SHIFT) + #ifdef JEMALLOC_TCACHE void arena_tcache_fill(arena_t *arena, tcache_bin_t *tbin, size_t binind); #endif @@ -407,14 +409,6 @@ void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); -void arena_stats_mprint(arena_t *arena, size_t nactive, size_t ndirty, - const arena_stats_t *astats, const malloc_bin_stats_t *bstats, - const malloc_large_stats_t *lstats, bool bins, bool large, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque); -void arena_stats_print(arena_t *arena, bool bins, bool large, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque); #endif void *arena_ralloc(void *ptr, size_t size, size_t oldsize); bool arena_new(arena_t *arena, unsigned ind); diff --git a/jemalloc/src/internal/jemalloc_chunk.h b/jemalloc/src/internal/jemalloc_chunk.h index 13b72ed4..40541e7a 100644 --- a/jemalloc/src/internal/jemalloc_chunk.h +++ b/jemalloc/src/internal/jemalloc_chunk.h @@ -33,6 +33,8 @@ extern bool opt_overcommit; #endif #ifdef JEMALLOC_STATS +/* Protects stats_chunks; currently not used for any other purpose. */ +extern malloc_mutex_t chunks_mtx; /* Chunk statistics. */ extern chunk_stats_t stats_chunks; #endif diff --git a/jemalloc/src/internal/jemalloc_chunk_swap.h b/jemalloc/src/internal/jemalloc_chunk_swap.h index 5bdf3073..d50cb197 100644 --- a/jemalloc/src/internal/jemalloc_chunk_swap.h +++ b/jemalloc/src/internal/jemalloc_chunk_swap.h @@ -12,6 +12,9 @@ extern malloc_mutex_t swap_mtx; extern bool swap_enabled; +extern bool swap_prezeroed; +extern size_t swap_nfds; +extern int *swap_fds; #ifdef JEMALLOC_STATS extern size_t swap_avail; #endif diff --git a/jemalloc/src/internal/jemalloc_ctl.h b/jemalloc/src/internal/jemalloc_ctl.h new file mode 100644 index 00000000..64a620a7 --- /dev/null +++ b/jemalloc/src/internal/jemalloc_ctl.h @@ -0,0 +1,108 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ctl_node_s ctl_node_t; +typedef struct ctl_arena_stats_s ctl_arena_stats_t; +typedef struct ctl_stats_s ctl_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ctl_node_s { + bool named; + union { + struct { + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + unsigned nchildren; + const ctl_node_t *children; + } named; + struct { + const ctl_node_t *(*index)(const size_t *, size_t, + size_t); + } indexed; + } u; + int (*ctl)(const size_t *, size_t, void *, size_t *, void *, + size_t); +}; + +struct ctl_arena_stats_s { + bool initialized; + size_t pactive; + size_t pdirty; +#ifdef JEMALLOC_STATS + arena_stats_t astats; + malloc_bin_stats_t *bstats; /* nbins elements. */ + malloc_large_stats_t *lstats; /* nlclasses elements. */ +#endif +}; + +struct ctl_stats_s { +#ifdef JEMALLOC_STATS + size_t allocated; + size_t active; + size_t mapped; + struct { + size_t current; /* stats_chunks.curchunks */ + uint64_t total; /* stats_chunks.nchunks */ + size_t high; /* stats_chunks.highchunks */ + } chunks; + struct { + size_t allocated; /* huge_allocated */ + uint64_t nmalloc; /* huge_nmalloc */ + uint64_t ndalloc; /* huge_ndalloc */ + } huge; +#endif + ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ +#ifdef JEMALLOC_SWAP + size_t swap_avail; +#endif +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen); +int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); + +int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +bool ctl_boot(void); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (mallctl(name, oldp, oldlenp, newp, newlen) != 0) { \ + malloc_write4(": Invalid xmallctl(\"", name, \ + "\", ...) call\n", ""); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_write4( \ + ": Invalid xmallctlnametomib(\"", name, \ + "\", ...) call\n", ""); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (mallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_write4( \ + ": Invalid xmallctlbymib() call\n", "", \ + "", ""); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + diff --git a/jemalloc/src/internal/jemalloc_internal.h.in b/jemalloc/src/internal/jemalloc_internal.h.in index af6543e1..e8326e30 100644 --- a/jemalloc/src/internal/jemalloc_internal.h.in +++ b/jemalloc/src/internal/jemalloc_internal.h.in @@ -170,6 +170,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *w4opaque, const char *p1, (((s) + PAGE_MASK) & ~PAGE_MASK) #include "internal/jemalloc_stats.h" +#include "internal/jemalloc_ctl.h" #include "internal/jemalloc_mutex.h" #include "internal/jemalloc_extent.h" #include "internal/jemalloc_arena.h" @@ -184,6 +185,7 @@ extern void (*JEMALLOC_P(malloc_message))(void *w4opaque, const char *p1, #define JEMALLOC_H_STRUCTS #include "internal/jemalloc_stats.h" +#include "internal/jemalloc_ctl.h" #include "internal/jemalloc_mutex.h" #include "internal/jemalloc_extent.h" #include "internal/jemalloc_arena.h" @@ -220,6 +222,7 @@ extern size_t lg_pagesize; /* Number of CPUs. */ extern unsigned ncpus; +extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ #ifndef NO_TLS /* * Map of pthread_self() --> arenas[???], used for selecting an arena to use @@ -240,6 +243,7 @@ arena_t *choose_arena_hard(void); #endif #include "internal/jemalloc_stats.h" +#include "internal/jemalloc_ctl.h" #include "internal/jemalloc_mutex.h" #include "internal/jemalloc_extent.h" #include "internal/jemalloc_arena.h" @@ -254,6 +258,7 @@ arena_t *choose_arena_hard(void); #define JEMALLOC_H_INLINES #include "internal/jemalloc_stats.h" +#include "internal/jemalloc_ctl.h" #include "internal/jemalloc_mutex.h" #include "internal/jemalloc_extent.h" #include "internal/jemalloc_base.h" diff --git a/jemalloc/src/internal/jemalloc_trace.h b/jemalloc/src/internal/jemalloc_trace.h index 624d08cf..e474ed03 100644 --- a/jemalloc/src/internal/jemalloc_trace.h +++ b/jemalloc/src/internal/jemalloc_trace.h @@ -21,7 +21,7 @@ void trace_free(const void *ptr, size_t size); void trace_malloc_usable_size(size_t size, const void *ptr); void trace_thread_exit(void); -void trace_boot(void); +bool trace_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c index a18a1571..b807a22d 100644 --- a/jemalloc/src/jemalloc.c +++ b/jemalloc/src/jemalloc.c @@ -95,12 +95,12 @@ /******************************************************************************/ /* Data. */ +malloc_mutex_t arenas_lock; arena_t **arenas; unsigned narenas; #ifndef NO_TLS static unsigned next_arena; #endif -static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ #ifndef NO_TLS __thread arena_t *arenas_map JEMALLOC_ATTR(tls_model("initial-exec")); @@ -710,9 +710,18 @@ MALLOC_OUT: } } + if (ctl_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + #ifdef JEMALLOC_TRACE - if (opt_trace) - trace_boot(); + if (opt_trace) { + if (trace_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + } #endif if (opt_stats_print) { /* Print statistics at exit. */ @@ -722,6 +731,11 @@ MALLOC_OUT: /* Register fork handlers. */ pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork); + if (base_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + if (arena_boot0()) { malloc_mutex_unlock(&init_lock); return (true); @@ -742,11 +756,6 @@ MALLOC_OUT: return (true); } - if (huge_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). @@ -1222,22 +1231,6 @@ JEMALLOC_P(malloc_swap_enable)(const int *fds, unsigned nfds, int prezeroed) } #endif -#ifdef JEMALLOC_TCACHE -JEMALLOC_ATTR(visibility("default")) -void -JEMALLOC_P(malloc_tcache_flush)(void) -{ - tcache_t *tcache; - - tcache = tcache_tls; - if (tcache == NULL) - return; - - tcache_destroy(tcache); - tcache_tls = NULL; -} -#endif - JEMALLOC_ATTR(visibility("default")) void JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *, @@ -1247,6 +1240,32 @@ JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *, stats_print(write4, w4opaque, opts); } +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + + return (ctl_byname(name, oldp, oldlenp, newp, newlen)); +} + +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp) +{ + + return (ctl_nametomib(name, mibp, miblenp)); +} + +JEMALLOC_ATTR(visibility("default")) +int +JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + + return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); +} + /* * End non-standard functions. */ diff --git a/jemalloc/src/jemalloc.h.in b/jemalloc/src/jemalloc.h.in index dee8d823..baa84594 100644 --- a/jemalloc/src/jemalloc.h.in +++ b/jemalloc/src/jemalloc.h.in @@ -21,16 +21,15 @@ void *JEMALLOC_P(realloc)(void *ptr, size_t size); void JEMALLOC_P(free)(void *ptr); size_t JEMALLOC_P(malloc_usable_size)(const void *ptr); -#ifdef JEMALLOC_SWAP -int JEMALLOC_P(malloc_swap_enable)(const int *fds, unsigned nfds, - int prezeroed); -#endif -#ifdef JEMALLOC_TCACHE -void JEMALLOC_P(malloc_tcache_flush)(void); -#endif void JEMALLOC_P(malloc_stats_print)(void (*write4)(void *, const char *, const char *, const char *, const char *), void *w4opaque, const char *opts); +int JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +int JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, + size_t *miblenp); +int JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); #ifdef __cplusplus }; diff --git a/jemalloc/src/jemalloc_arena.c b/jemalloc/src/jemalloc_arena.c index e8b21d75..fa84f664 100644 --- a/jemalloc/src/jemalloc_arena.c +++ b/jemalloc/src/jemalloc_arena.c @@ -172,19 +172,6 @@ static void *arena_malloc_large(arena_t *arena, size_t size, bool zero); static bool arena_is_large(const void *ptr); static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); -#ifdef JEMALLOC_STATS -static void arena_stats_aprint(size_t nactive, size_t ndirty, - const arena_stats_t *astats, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque); -static void arena_stats_bprint(arena_t *arena, - const malloc_bin_stats_t *bstats, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque); -static void arena_stats_lprint(const malloc_large_stats_t *lstats, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque); -#endif static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, size_t size, size_t oldsize); static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, @@ -1570,7 +1557,7 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { - unsigned i, nlclasses; + unsigned i; *nactive += arena->nactive; *ndirty += arena->ndirty; @@ -1601,193 +1588,12 @@ arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, bstats[i].curruns += arena->bins[i].stats.curruns; } - for (i = 0, nlclasses = (chunksize - PAGE_SIZE) >> PAGE_SHIFT; - i < nlclasses; - i++) { + for (i = 0; i < nlclasses; i++) { lstats[i].nrequests += arena->stats.lstats[i].nrequests; lstats[i].highruns += arena->stats.lstats[i].highruns; lstats[i].curruns += arena->stats.lstats[i].curruns; } } - -static void -arena_stats_aprint(size_t nactive, size_t ndirty, const arena_stats_t *astats, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque) -{ - - malloc_cprintf(write4, w4opaque, - "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," - " %"PRIu64" madvise%s, %"PRIu64" purged\n", - nactive, ndirty, - astats->npurge, astats->npurge == 1 ? "" : "s", - astats->nmadvise, astats->nmadvise == 1 ? "" : "s", - astats->purged); - - malloc_cprintf(write4, w4opaque, - " allocated nmalloc ndalloc\n"); - malloc_cprintf(write4, w4opaque, - "small: %12zu %12"PRIu64" %12"PRIu64"\n", - astats->allocated_small, astats->nmalloc_small, - astats->ndalloc_small); - malloc_cprintf(write4, w4opaque, - "medium: %12zu %12"PRIu64" %12"PRIu64"\n", - astats->allocated_medium, astats->nmalloc_medium, - astats->ndalloc_medium); - malloc_cprintf(write4, w4opaque, - "large: %12zu %12"PRIu64" %12"PRIu64"\n", - astats->allocated_large, astats->nmalloc_large, - astats->ndalloc_large); - malloc_cprintf(write4, w4opaque, - "total: %12zu %12"PRIu64" %12"PRIu64"\n", - astats->allocated_small + astats->allocated_medium + - astats->allocated_large, astats->nmalloc_small + - astats->nmalloc_medium + astats->nmalloc_large, - astats->ndalloc_small + astats->ndalloc_medium + - astats->ndalloc_large); - malloc_cprintf(write4, w4opaque, "mapped: %12zu\n", astats->mapped); -} - -static void -arena_stats_bprint(arena_t *arena, const malloc_bin_stats_t *bstats, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque) -{ - unsigned i, gap_start; - -#ifdef JEMALLOC_TCACHE - malloc_cprintf(write4, w4opaque, - "bins: bin size regs pgs requests " - "nfills nflushes newruns reruns maxruns curruns\n"); -#else - malloc_cprintf(write4, w4opaque, - "bins: bin size regs pgs requests " - "newruns reruns maxruns curruns\n"); -#endif - for (i = 0, gap_start = UINT_MAX; i < nbins; i++) { - if (bstats[i].nruns == 0) { - if (gap_start == UINT_MAX) - gap_start = i; - } else { - if (gap_start != UINT_MAX) { - if (i > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write4, w4opaque, - "[%u..%u]\n", gap_start, - i - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write4, w4opaque, - "[%u]\n", gap_start); - } - gap_start = UINT_MAX; - } - malloc_cprintf(write4, w4opaque, - "%13u %1s %5u %4u %3u %9"PRIu64" %9"PRIu64 -#ifdef JEMALLOC_TCACHE - " %9"PRIu64" %9"PRIu64"" -#endif - " %9"PRIu64" %7zu %7zu\n", - i, - i < ntbins ? "T" : i < ntbins + nqbins ? - "Q" : i < ntbins + nqbins + ncbins ? "C" : - i < ntbins + nqbins + ncbins + nsbins ? "S" - : "M", - arena->bins[i].reg_size, - arena->bins[i].nregs, - arena->bins[i].run_size >> PAGE_SHIFT, - bstats[i].nrequests, -#ifdef JEMALLOC_TCACHE - bstats[i].nfills, - bstats[i].nflushes, -#endif - bstats[i].nruns, - bstats[i].reruns, - bstats[i].highruns, - bstats[i].curruns); - } - } - if (gap_start != UINT_MAX) { - if (i > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write4, w4opaque, "[%u..%u]\n", - gap_start, i - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write4, w4opaque, "[%u]\n", gap_start); - } - } -} - -static void -arena_stats_lprint(const malloc_large_stats_t *lstats, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque) -{ - size_t i; - ssize_t gap_start; - size_t nlclasses = (chunksize - PAGE_SIZE) >> PAGE_SHIFT; - - malloc_cprintf(write4, w4opaque, - "large: size pages nrequests maxruns curruns\n"); - - for (i = 0, gap_start = -1; i < nlclasses; i++) { - if (lstats[i].nrequests == 0) { - if (gap_start == -1) - gap_start = i; - } else { - if (gap_start != -1) { - malloc_cprintf(write4, w4opaque, "[%zu]\n", - i - gap_start); - gap_start = -1; - } - malloc_cprintf(write4, w4opaque, - "%13zu %5zu %9"PRIu64" %9zu %9zu\n", - (i+1) << PAGE_SHIFT, i+1, - lstats[i].nrequests, - lstats[i].highruns, - lstats[i].curruns); - } - } - if (gap_start != -1) - malloc_cprintf(write4, w4opaque, "[%zu]\n", i - gap_start); -} - -void -arena_stats_mprint(arena_t *arena, size_t nactive, size_t ndirty, - const arena_stats_t *astats, const malloc_bin_stats_t *bstats, - const malloc_large_stats_t *lstats, bool bins, bool large, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque) -{ - - arena_stats_aprint(nactive, ndirty, astats, write4, w4opaque); - if (bins && astats->nmalloc_small + astats->nmalloc_medium > 0) - arena_stats_bprint(arena, bstats, write4, w4opaque); - if (large && astats->nmalloc_large > 0) - arena_stats_lprint(lstats, write4, w4opaque); -} - -void -arena_stats_print(arena_t *arena, bool bins, bool large, - void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque) -{ - size_t nactive, ndirty; - arena_stats_t astats; - malloc_bin_stats_t bstats[nbins]; - malloc_large_stats_t lstats[((chunksize - PAGE_SIZE) >> PAGE_SHIFT)]; - - nactive = 0; - ndirty = 0; - memset(&astats, 0, sizeof(astats)); - memset(bstats, 0, sizeof(bstats)); - memset(lstats, 0, sizeof(lstats)); - - arena_stats_merge(arena, &nactive, &ndirty, &astats, bstats, lstats); - arena_stats_mprint(arena, nactive, ndirty, &astats, bstats, lstats, - bins, large, write4, w4opaque); -} #endif void diff --git a/jemalloc/src/jemalloc_chunk.c b/jemalloc/src/jemalloc_chunk.c index c42151dc..deaef7ff 100644 --- a/jemalloc/src/jemalloc_chunk.c +++ b/jemalloc/src/jemalloc_chunk.c @@ -10,6 +10,7 @@ bool opt_overcommit = true; #endif #ifdef JEMALLOC_STATS +malloc_mutex_t chunks_mtx; chunk_stats_t stats_chunks; #endif @@ -64,11 +65,13 @@ chunk_alloc(size_t size, bool *zero) RETURN: #ifdef JEMALLOC_STATS if (ret != NULL) { + malloc_mutex_lock(&chunks_mtx); stats_chunks.nchunks += (size / chunksize); stats_chunks.curchunks += (size / chunksize); + if (stats_chunks.curchunks > stats_chunks.highchunks) + stats_chunks.highchunks = stats_chunks.curchunks; + malloc_mutex_unlock(&chunks_mtx); } - if (stats_chunks.curchunks > stats_chunks.highchunks) - stats_chunks.highchunks = stats_chunks.curchunks; #endif assert(CHUNK_ADDR2BASE(ret) == ret); @@ -85,7 +88,9 @@ chunk_dealloc(void *chunk, size_t size) assert((size & chunksize_mask) == 0); #ifdef JEMALLOC_STATS + malloc_mutex_lock(&chunks_mtx); stats_chunks.curchunks -= (size / chunksize); + malloc_mutex_unlock(&chunks_mtx); #endif #ifdef JEMALLOC_SWAP @@ -110,6 +115,8 @@ chunk_boot(void) chunk_npages = (chunksize >> PAGE_SHIFT); #ifdef JEMALLOC_STATS + if (malloc_mutex_init(&chunks_mtx)) + return (true); memset(&stats_chunks, 0, sizeof(chunk_stats_t)); #endif diff --git a/jemalloc/src/jemalloc_chunk_dss.c b/jemalloc/src/jemalloc_chunk_dss.c index 7c6cdd61..e38b876a 100644 --- a/jemalloc/src/jemalloc_chunk_dss.c +++ b/jemalloc/src/jemalloc_chunk_dss.c @@ -243,7 +243,7 @@ chunk_dealloc_dss(void *chunk, size_t size) goto RETURN; } - ret = true + ret = true; RETURN: malloc_mutex_unlock(&dss_mtx); return (ret); diff --git a/jemalloc/src/jemalloc_chunk_swap.c b/jemalloc/src/jemalloc_chunk_swap.c index 0a304714..fa5a5b14 100644 --- a/jemalloc/src/jemalloc_chunk_swap.c +++ b/jemalloc/src/jemalloc_chunk_swap.c @@ -6,12 +6,13 @@ malloc_mutex_t swap_mtx; bool swap_enabled; +bool swap_prezeroed; +size_t swap_nfds; +int *swap_fds; #ifdef JEMALLOC_STATS size_t swap_avail; #endif -static bool swap_prezeroed; - /* Base address of the mmap()ed file(s). */ static void *swap_base; /* Current end of the space in use (<= swap_max). */ @@ -318,12 +319,21 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed) swap_end = swap_base; swap_max = (void *)((uintptr_t)vaddr + cumsize); - swap_enabled = true; + /* Copy the fds array for mallctl purposes. */ + swap_fds = (int *)base_alloc(nfds * sizeof(int)); + if (swap_fds == NULL) { + ret = true; + goto RETURN; + } + memcpy(swap_fds, fds, nfds * sizeof(int)); + swap_nfds = nfds; #ifdef JEMALLOC_STATS swap_avail = cumsize; #endif + swap_enabled = true; + ret = false; RETURN: malloc_mutex_unlock(&swap_mtx); @@ -338,10 +348,12 @@ chunk_swap_boot(void) return (true); swap_enabled = false; + swap_prezeroed = false; /* swap.* mallctl's depend on this. */ + swap_nfds = 0; + swap_fds = NULL; #ifdef JEMALLOC_STATS swap_avail = 0; #endif - swap_prezeroed = false; swap_base = NULL; swap_end = NULL; swap_max = NULL; diff --git a/jemalloc/src/jemalloc_ctl.c b/jemalloc/src/jemalloc_ctl.c new file mode 100644 index 00000000..bbf007f2 --- /dev/null +++ b/jemalloc/src/jemalloc_ctl.c @@ -0,0 +1,1291 @@ +#define JEMALLOC_CTL_C_ +#include "internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static uint64_t ctl_epoch; +static ctl_stats_t ctl_stats; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \ + size_t i); + +#ifdef JEMALLOC_STATS +static bool ctl_arena_init(ctl_arena_stats_t *astats); +#endif +static void ctl_arena_clear(ctl_arena_stats_t *astats); +static void ctl_arena_refresh(arena_t *arena, unsigned i); +static void ctl_refresh(void); +static bool ctl_init(void); +static int ctl_lookup(const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp); + +CTL_PROTO(epoch) +#ifdef JEMALLOC_TCACHE +CTL_PROTO(tcache_flush) +#endif +CTL_PROTO(config_debug) +CTL_PROTO(config_dss) +CTL_PROTO(config_dynamic_page_shift) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_stats) +CTL_PROTO(config_swap) +CTL_PROTO(config_sysv) +CTL_PROTO(config_tcache) +CTL_PROTO(config_tiny) +CTL_PROTO(config_tls) +CTL_PROTO(config_trace) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +#ifdef JEMALLOC_FILL +CTL_PROTO(opt_junk) +#endif +#ifdef JEMALLOC_SYSV +CTL_PROTO(opt_sysv) +#endif +#ifdef JEMALLOC_XMALLOC +CTL_PROTO(opt_xmalloc) +#endif +#ifdef JEMALLOC_ZERO +CTL_PROTO(opt_zero) +#endif +#ifdef JEMALLOC_TCACHE +CTL_PROTO(opt_lg_tcache_nslots) +CTL_PROTO(opt_lg_tcache_gc_sweep) +CTL_PROTO(opt_tcache_sort) +#endif +CTL_PROTO(opt_stats_print) +#ifdef JEMALLOC_TRACE +CTL_PROTO(opt_trace) +#endif +CTL_PROTO(opt_lg_qspace_max) +CTL_PROTO(opt_lg_cspace_max) +CTL_PROTO(opt_lg_medium_max) +CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_lg_chunk) +#ifdef JEMALLOC_SWAP +CTL_PROTO(opt_overcommit) +#endif +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_run_size) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lrun_i_size) +INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_cacheline) +CTL_PROTO(arenas_subpage) +CTL_PROTO(arenas_pagesize) +CTL_PROTO(arenas_medium) +CTL_PROTO(arenas_chunksize) +#ifdef JEMALLOC_TINY +CTL_PROTO(arenas_tspace_min) +CTL_PROTO(arenas_tspace_max) +#endif +CTL_PROTO(arenas_qspace_min) +CTL_PROTO(arenas_qspace_max) +CTL_PROTO(arenas_cspace_min) +CTL_PROTO(arenas_cspace_max) +CTL_PROTO(arenas_sspace_min) +CTL_PROTO(arenas_sspace_max) +CTL_PROTO(arenas_medium_min) +CTL_PROTO(arenas_medium_max) +CTL_PROTO(arenas_ntbins) +CTL_PROTO(arenas_nqbins) +CTL_PROTO(arenas_ncbins) +CTL_PROTO(arenas_nsbins) +CTL_PROTO(arenas_nmbins) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nlruns) +#ifdef JEMALLOC_STATS +CTL_PROTO(stats_chunks_current) +CTL_PROTO(stats_chunks_total) +CTL_PROTO(stats_chunks_high) +CTL_PROTO(stats_huge_allocated) +CTL_PROTO(stats_huge_nmalloc) +CTL_PROTO(stats_huge_ndalloc) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_medium_allocated) +CTL_PROTO(stats_arenas_i_medium_nmalloc) +CTL_PROTO(stats_arenas_i_medium_ndalloc) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +#ifdef JEMALLOC_TCACHE +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +#endif +CTL_PROTO(stats_arenas_i_bins_j_nruns) +CTL_PROTO(stats_arenas_i_bins_j_nreruns) +CTL_PROTO(stats_arenas_i_bins_j_highruns) +CTL_PROTO(stats_arenas_i_bins_j_curruns) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lruns_j_nrequests) +CTL_PROTO(stats_arenas_i_lruns_j_highruns) +CTL_PROTO(stats_arenas_i_lruns_j_curruns) +INDEX_PROTO(stats_arenas_i_lruns_j) +#endif +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +#ifdef JEMALLOC_STATS +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_npurge) +CTL_PROTO(stats_arenas_i_nmadvise) +CTL_PROTO(stats_arenas_i_purged) +#endif +INDEX_PROTO(stats_arenas_i) +#ifdef JEMALLOC_STATS +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_mapped) +#endif +#ifdef JEMALLOC_SWAP +# ifdef JEMALLOC_STATS +CTL_PROTO(swap_avail) +# endif +CTL_PROTO(swap_prezeroed) +CTL_PROTO(swap_nfds) +CTL_PROTO(swap_fds) +#endif + +/******************************************************************************/ +/* mallctl tree. */ + +/* Maximum tree depth. */ +#define CTL_MAX_DEPTH 6 + +#define NAME(n) true, {.named = {n +#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL +#define CTL(c) 0, NULL}}, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) false, {.indexed = {i##_index}}, NULL + +#ifdef JEMALLOC_TCACHE +static const ctl_node_t tcache_node[] = { + {NAME("flush"), CTL(tcache_flush)} +}; +#endif + +static const ctl_node_t config_node[] = { + {NAME("debug"), CTL(config_debug)}, + {NAME("dss"), CTL(config_dss)}, + {NAME("dynamic_page_shift"), CTL(config_dynamic_page_shift)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("swap"), CTL(config_swap)}, + {NAME("sysv"), CTL(config_sysv)}, + {NAME("tcache"), CTL(config_tcache)}, + {NAME("tiny"), CTL(config_tiny)}, + {NAME("tls"), CTL(config_tls)}, + {NAME("trace"), CTL(config_trace)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, +#ifdef JEMALLOC_FILL + {NAME("junk"), CTL(opt_junk)}, +#endif +#ifdef JEMALLOC_SYSV + {NAME("sysv"), CTL(opt_sysv)}, +#endif +#ifdef JEMALLOC_XMALLOC + {NAME("xmalloc"), CTL(opt_xmalloc)}, +#endif +#ifdef JEMALLOC_ZERO + {NAME("zero"), CTL(opt_zero)}, +#endif +#ifdef JEMALLOC_TCACHE + {NAME("lg_tcache_nslots"), CTL(opt_lg_tcache_nslots)}, + {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)}, + {NAME("tcache_sort"), CTL(opt_tcache_sort)}, +#endif + {NAME("stats_print"), CTL(opt_stats_print)}, +#ifdef JEMALLOC_TRACE + {NAME("trace"), CTL(opt_trace)}, +#endif + {NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)}, + {NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)}, + {NAME("lg_medium_max"), CTL(opt_lg_medium_max)}, + {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("lg_chunk"), CTL(opt_lg_chunk)} +#ifdef JEMALLOC_SWAP + , + {NAME("overcommit"), CTL(opt_overcommit)} +#endif +}; + +static const ctl_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("run_size"), CTL(arenas_bin_i_run_size)} +}; +static const ctl_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(arenas_bin_i)} +}; + +static const ctl_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_node_t arenas_lrun_i_node[] = { + {NAME("size"), CTL(arenas_lrun_i_size)} +}; +static const ctl_node_t super_arenas_lrun_i_node[] = { + {NAME(""), CHILD(arenas_lrun_i)} +}; + +static const ctl_node_t arenas_lrun_node[] = { + {INDEX(arenas_lrun_i)} +}; + +static const ctl_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("initialized"), CTL(arenas_initialized)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("cacheline"), CTL(arenas_cacheline)}, + {NAME("subpage"), CTL(arenas_subpage)}, + {NAME("pagesize"), CTL(arenas_pagesize)}, + {NAME("medium"), CTL(arenas_medium)}, + {NAME("chunksize"), CTL(arenas_chunksize)}, +#ifdef JEMALLOC_TINY + {NAME("tspace_min"), CTL(arenas_tspace_min)}, + {NAME("tspace_max"), CTL(arenas_tspace_max)}, +#endif + {NAME("qspace_min"), CTL(arenas_qspace_min)}, + {NAME("qspace_max"), CTL(arenas_qspace_max)}, + {NAME("cspace_min"), CTL(arenas_cspace_min)}, + {NAME("cspace_max"), CTL(arenas_cspace_max)}, + {NAME("sspace_min"), CTL(arenas_sspace_min)}, + {NAME("sspace_max"), CTL(arenas_sspace_max)}, + {NAME("medium_min"), CTL(arenas_medium_min)}, + {NAME("medium_max"), CTL(arenas_medium_max)}, + {NAME("ntbins"), CTL(arenas_ntbins)}, + {NAME("nqbins"), CTL(arenas_nqbins)}, + {NAME("ncbins"), CTL(arenas_ncbins)}, + {NAME("nsbins"), CTL(arenas_nsbins)}, + {NAME("nmbins"), CTL(arenas_nmbins)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("bin"), CHILD(arenas_bin)}, + {NAME("nlruns"), CTL(arenas_nlruns)}, + {NAME("lrun"), CHILD(arenas_lrun)} +}; + +#ifdef JEMALLOC_STATS +static const ctl_node_t stats_chunks_node[] = { + {NAME("current"), CTL(stats_chunks_current)}, + {NAME("total"), CTL(stats_chunks_total)}, + {NAME("high"), CTL(stats_chunks_high)} +}; + +static const ctl_node_t stats_huge_node[] = { + {NAME("allocated"), CTL(stats_huge_allocated)}, + {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, + {NAME("ndalloc"), CTL(stats_huge_ndalloc)} +}; + +static const ctl_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)} +}; + +static const ctl_node_t stats_arenas_i_medium_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_medium_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_medium_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_medium_ndalloc)} +}; + +static const ctl_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)} +}; + +static const ctl_node_t stats_arenas_i_bins_j_node[] = { + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, +#ifdef JEMALLOC_TCACHE + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, +#endif + {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, + {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, + {NAME("highruns"), CTL(stats_arenas_i_bins_j_highruns)}, + {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} +}; +static const ctl_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(stats_arenas_i_bins_j)} +}; + +static const ctl_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_node_t stats_arenas_i_lruns_j_node[] = { + {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, + {NAME("highruns"), CTL(stats_arenas_i_lruns_j_highruns)}, + {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +}; +static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = { + {NAME(""), CHILD(stats_arenas_i_lruns_j)} +}; + +static const ctl_node_t stats_arenas_i_lruns_node[] = { + {INDEX(stats_arenas_i_lruns_j)} +}; +#endif + +static const ctl_node_t stats_arenas_i_node[] = { + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)} +#ifdef JEMALLOC_STATS + , + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("npurge"), CTL(stats_arenas_i_npurge)}, + {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, + {NAME("purged"), CTL(stats_arenas_i_purged)}, + {NAME("small"), CHILD(stats_arenas_i_small)}, + {NAME("medium"), CHILD(stats_arenas_i_medium)}, + {NAME("large"), CHILD(stats_arenas_i_large)}, + {NAME("bins"), CHILD(stats_arenas_i_bins)}, + {NAME("lruns"), CHILD(stats_arenas_i_lruns)} +#endif +}; +static const ctl_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(stats_arenas_i)} +}; + +static const ctl_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_node_t stats_node[] = { +#ifdef JEMALLOC_STATS + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("chunks"), CHILD(stats_chunks)}, + {NAME("huge"), CHILD(stats_huge)}, +#endif + {NAME("arenas"), CHILD(stats_arenas)} +}; + +#ifdef JEMALLOC_SWAP +static const ctl_node_t swap_node[] = { +# ifdef JEMALLOC_STATS + {NAME("avail"), CTL(swap_avail)}, +# endif + {NAME("prezeroed"), CTL(swap_prezeroed)}, + {NAME("nfds"), CTL(swap_nfds)}, + {NAME("fds"), CTL(swap_fds)} +}; +#endif + +static const ctl_node_t root_node[] = { + {NAME("epoch"), CTL(epoch)}, +#ifdef JEMALLOC_TCACHE + {NAME("tcache"), CHILD(tcache)}, +#endif + {NAME("config"), CHILD(config)}, + {NAME("opt"), CHILD(opt)}, + {NAME("arenas"), CHILD(arenas)}, + {NAME("stats"), CHILD(stats)} +#ifdef JEMALLOC_SWAP + , + {NAME("swap"), CHILD(swap)} +#endif +}; +static const ctl_node_t super_root_node[] = { + {NAME(""), CHILD(root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +#ifdef JEMALLOC_STATS +static bool +ctl_arena_init(ctl_arena_stats_t *astats) +{ + + if (astats->bstats == NULL) { + astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins * + sizeof(malloc_bin_stats_t)); + if (astats->bstats == NULL) + return (true); + } + if (astats->lstats == NULL) { + astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (astats->lstats == NULL) + return (true); + } + + return (false); +} +#endif + +static void +ctl_arena_clear(ctl_arena_stats_t *astats) +{ + + astats->pactive = 0; + astats->pdirty = 0; +#ifdef JEMALLOC_STATS + memset(&astats->astats, 0, sizeof(arena_stats_t)); + memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t)); + memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t)); +#endif +} + +static void +ctl_arena_refresh(arena_t *arena, unsigned i) +{ + ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; + ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas]; + + ctl_arena_clear(astats); + + malloc_mutex_lock(&arena->lock); +#ifdef JEMALLOC_STATS + arena_stats_merge(arena, &astats->pactive, &astats->pdirty, + &astats->astats, astats->bstats, astats->lstats); + /* Merge into sum stats as well. */ + arena_stats_merge(arena, &sstats->pactive, &sstats->pdirty, + &sstats->astats, sstats->bstats, sstats->lstats); +#else + astats->pactive += arena->nactive; + astats->pdirty += arena->ndirty; + /* Merge into sum stats as well. */ + sstats->pactive += arena->nactive; + sstats->pdirty += arena->ndirty; +#endif + malloc_mutex_unlock(&arena->lock); +} + +static void +ctl_refresh(void) +{ + unsigned i; + arena_t *tarenas[narenas]; + +#ifdef JEMALLOC_STATS + malloc_mutex_lock(&chunks_mtx); + ctl_stats.chunks.current = stats_chunks.curchunks; + ctl_stats.chunks.total = stats_chunks.nchunks; + ctl_stats.chunks.high = stats_chunks.highchunks; + malloc_mutex_unlock(&chunks_mtx); + + malloc_mutex_lock(&huge_mtx); + ctl_stats.huge.allocated = huge_allocated; + ctl_stats.huge.nmalloc = huge_nmalloc; + ctl_stats.huge.ndalloc = huge_ndalloc; + malloc_mutex_unlock(&huge_mtx); +#endif + + /* + * Clear sum stats, since the will be merged into by + * ctl_arena_refresh(). + */ + ctl_arena_clear(&ctl_stats.arenas[narenas]); + + malloc_mutex_lock(&arenas_lock); + memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); + malloc_mutex_unlock(&arenas_lock); + for (i = 0; i < narenas; i++) { + bool initialized = (tarenas[i] != NULL); + + ctl_stats.arenas[i].initialized = initialized; + if (initialized) + ctl_arena_refresh(tarenas[i], i); + } + +#ifdef JEMALLOC_STATS + ctl_stats.allocated = ctl_stats.arenas[narenas].astats.allocated_small + + ctl_stats.arenas[narenas].astats.allocated_medium + + ctl_stats.arenas[narenas].astats.allocated_large + + ctl_stats.huge.allocated; + ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT); + ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); + +# ifdef JEMALLOC_SWAP + malloc_mutex_lock(&swap_mtx); + ctl_stats.swap_avail = swap_avail; + malloc_mutex_unlock(&swap_mtx); +# endif +#endif + + ctl_epoch++; +} + +static bool +ctl_init(void) +{ + + if (ctl_initialized == false) { +#ifdef JEMALLOC_STATS + unsigned i; +#endif + + /* + * Allocate space for one extra arena stats element, which + * contains summed stats across all arenas. + */ + ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( + (narenas + 1) * sizeof(ctl_arena_stats_t)); + if (ctl_stats.arenas == NULL) + return (true); + memset(ctl_stats.arenas, 0, (narenas + 1) * + sizeof(ctl_arena_stats_t)); + + /* + * Initialize all stats structures, regardless of whether they + * ever get used. Lazy initialization would allow errors to + * cause inconsistent state to be viewable by the application. + */ +#ifdef JEMALLOC_STATS + for (i = 0; i <= narenas; i++) { + if (ctl_arena_init(&ctl_stats.arenas[i])) + return (true); + } +#endif + ctl_stats.arenas[narenas].initialized = true; + + ctl_epoch = 0; + ctl_refresh(); + ctl_initialized = true; + } + + return (false); +} + +static int +ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, + size_t *depthp) +{ + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto RETURN; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node->named); + assert(node->u.named.nchildren > 0); + if (node->u.named.children[0].named) { + const ctl_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->u.named.nchildren; j++) { + const ctl_node_t *child = + &node->u.named.children[j]; + if (strlen(child->u.named.name) == elen + && strncmp(elm, child->u.named.name, + elen) == 0) { + node = child; + if (nodesp != NULL) + nodesp[i] = node; + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto RETURN; + } + } else { + unsigned long index; + const ctl_node_t *inode; + + /* Children are indexed. */ + index = strtoul(elm, NULL, 10); + if (index == ULONG_MAX) { + ret = ENOENT; + goto RETURN; + } + + inode = &node->u.named.children[0]; + node = inode->u.indexed.index(mibp, *depthp, + index); + if (node == NULL) { + ret = ENOENT; + goto RETURN; + } + + if (nodesp != NULL) + nodesp[i] = node; + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto RETURN; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto RETURN; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +RETURN: + return (ret); +} + +int +ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_init()) { + ret = EAGAIN; + goto RETURN; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(name, nodes, mib, &depth); + if (ret != 0) + goto RETURN; + + if (nodes[depth-1]->ctl == NULL) { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + goto RETURN; + } + ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen); + +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return(ret); +} + +int +ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + int ret; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_init()) { + ret = EAGAIN; + goto RETURN; + } + + ret = ctl_lookup(name, NULL, mibp, miblenp); + +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return(ret); +} + +int +ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const ctl_node_t *node; + size_t i; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_init()) { + ret = EAGAIN; + goto RETURN; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + if (node->u.named.children[0].named) { + /* Children are named. */ + if (node->u.named.nchildren <= mib[i]) { + ret = ENOENT; + goto RETURN; + } + node = &node->u.named.children[mib[i]]; + } else { + const ctl_node_t *inode; + + /* Indexed element. */ + inode = &node->u.named.children[0]; + node = inode->u.indexed.index(mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto RETURN; + } + } + } + + /* Call the ctl function. */ + if (node->ctl == NULL) { + /* Partial MIB. */ + ret = ENOENT; + goto RETURN; + } + ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + +RETURN: + malloc_mutex_unlock(&ctl_mtx); + return(ret); +} + +bool +ctl_boot(void) +{ + + if (malloc_mutex_init(&ctl_mtx)) + return (true); + + ctl_initialized = false; + + return (false); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto RETURN; \ + } \ +} while (0) + +#define VOID() do { \ + READONLY(); \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto RETURN; \ + } \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&v, copylen); \ + ret = EINVAL; \ + goto RETURN; \ + } else \ + *(t *)oldp = v; \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto RETURN; \ + } \ + v = *(t *)newp; \ + } \ +} while (0) + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +RETURN: \ + return (ret); \ +} + +#define CTL_RO_TRUE_GEN(n) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + bool oldval; \ + \ + READONLY(); \ + oldval = true; \ + READ(oldval, bool); \ + \ + ret = 0; \ +RETURN: \ + return (ret); \ +} + +#define CTL_RO_FALSE_GEN(n) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + bool oldval; \ + \ + READONLY(); \ + oldval = false; \ + READ(oldval, bool); \ + \ + ret = 0; \ +RETURN: \ + return (ret); \ +} + +static int +epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + uint64_t newval; + + newval = 0; + WRITE(newval, uint64_t); + if (newval != 0) + ctl_refresh(); + READ(ctl_epoch, uint64_t); + + ret = 0; +RETURN: + return (ret); +} + +#ifdef JEMALLOC_TCACHE +static int +tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + tcache_t *tcache; + + VOID(); + + tcache = tcache_tls; + if (tcache == NULL) { + ret = 0; + goto RETURN; + } + tcache_destroy(tcache); + tcache_tls = NULL; + + ret = 0; +RETURN: + return (ret); +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_DEBUG +CTL_RO_TRUE_GEN(config_debug) +#else +CTL_RO_FALSE_GEN(config_debug) +#endif + +#ifdef JEMALLOC_DSS +CTL_RO_TRUE_GEN(config_dss) +#else +CTL_RO_FALSE_GEN(config_dss) +#endif + +#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT +CTL_RO_TRUE_GEN(config_dynamic_page_shift) +#else +CTL_RO_FALSE_GEN(config_dynamic_page_shift) +#endif + +#ifdef JEMALLOC_FILL +CTL_RO_TRUE_GEN(config_fill) +#else +CTL_RO_FALSE_GEN(config_fill) +#endif + +#ifdef JEMALLOC_LAZY_LOCK +CTL_RO_TRUE_GEN(config_lazy_lock) +#else +CTL_RO_FALSE_GEN(config_lazy_lock) +#endif + +#ifdef JEMALLOC_STATS +CTL_RO_TRUE_GEN(config_stats) +#else +CTL_RO_FALSE_GEN(config_stats) +#endif + +#ifdef JEMALLOC_SWAP +CTL_RO_TRUE_GEN(config_swap) +#else +CTL_RO_FALSE_GEN(config_swap) +#endif + +#ifdef JEMALLOC_SYSV +CTL_RO_TRUE_GEN(config_sysv) +#else +CTL_RO_FALSE_GEN(config_sysv) +#endif + +#ifdef JEMALLOC_TCACHE +CTL_RO_TRUE_GEN(config_tcache) +#else +CTL_RO_FALSE_GEN(config_tcache) +#endif + +#ifdef JEMALLOC_TINY +CTL_RO_TRUE_GEN(config_tiny) +#else +CTL_RO_FALSE_GEN(config_tiny) +#endif + +#ifdef JEMALLOC_TLS +CTL_RO_TRUE_GEN(config_tls) +#else +CTL_RO_FALSE_GEN(config_tls) +#endif + +#ifdef JEMALLOC_TRACE +CTL_RO_TRUE_GEN(config_trace) +#else +CTL_RO_FALSE_GEN(config_trace) +#endif + +#ifdef JEMALLOC_XMALLOC +CTL_RO_TRUE_GEN(config_xmalloc) +#else +CTL_RO_FALSE_GEN(config_xmalloc) +#endif + +/******************************************************************************/ + +CTL_RO_GEN(opt_abort, opt_abort, bool) +#ifdef JEMALLOC_FILL +CTL_RO_GEN(opt_junk, opt_junk, bool) +#endif +#ifdef JEMALLOC_SYSV +CTL_RO_GEN(opt_sysv, opt_sysv, bool) +#endif +#ifdef JEMALLOC_XMALLOC +CTL_RO_GEN(opt_xmalloc, opt_xmalloc, bool) +#endif +#ifdef JEMALLOC_ZERO +CTL_RO_GEN(opt_zero, opt_zero, bool) +#endif +#ifdef JEMALLOC_TCACHE +CTL_RO_GEN(opt_lg_tcache_nslots, opt_lg_tcache_nslots, size_t) +CTL_RO_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t) +CTL_RO_GEN(opt_tcache_sort, opt_tcache_sort, bool) +#endif +CTL_RO_GEN(opt_stats_print, opt_stats_print, bool) +#ifdef JEMALLOC_TRACE +CTL_RO_GEN(opt_trace, opt_trace, bool) +#endif +CTL_RO_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t) +CTL_RO_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t) +CTL_RO_GEN(opt_lg_medium_max, opt_lg_medium_max, size_t) +CTL_RO_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_GEN(opt_lg_chunk, opt_lg_chunk, size_t) +#ifdef JEMALLOC_SWAP +CTL_RO_GEN(opt_overcommit, opt_overcommit, bool) +#endif + +/******************************************************************************/ + +CTL_RO_GEN(arenas_bin_i_size, arenas[0]->bins[mib[2]].reg_size, size_t) +CTL_RO_GEN(arenas_bin_i_nregs, arenas[0]->bins[mib[2]].nregs, uint32_t) +CTL_RO_GEN(arenas_bin_i_run_size, arenas[0]->bins[mib[2]].run_size, size_t) +const ctl_node_t * +arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nbins) + return (NULL); + return (super_arenas_bin_i_node); +} + +CTL_RO_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t) +const ctl_node_t * +arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nlclasses) + return (NULL); + return (super_arenas_lrun_i_node); +} + +CTL_RO_GEN(arenas_narenas, narenas, unsigned) + +static int +arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned nread, i; + + READONLY(); + if (*oldlenp != narenas * sizeof(bool)) { + ret = EINVAL; + nread = (*oldlenp < narenas * sizeof(bool)) + ? (*oldlenp / sizeof(bool)) : narenas; + } else { + ret = 0; + nread = narenas; + } + + for (i = 0; i < nread; i++) + ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; + +RETURN: + return (ret); +} + +CTL_RO_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_GEN(arenas_cacheline, CACHELINE, size_t) +CTL_RO_GEN(arenas_subpage, SUBPAGE, size_t) +CTL_RO_GEN(arenas_pagesize, PAGE_SIZE, size_t) +CTL_RO_GEN(arenas_medium, (1U << lg_mspace), size_t) +CTL_RO_GEN(arenas_chunksize, chunksize, size_t) +#ifdef JEMALLOC_TINY +CTL_RO_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t) +CTL_RO_GEN(arenas_tspace_max, (qspace_min >> 1), size_t) +#endif +CTL_RO_GEN(arenas_qspace_min, qspace_min, size_t) +CTL_RO_GEN(arenas_qspace_max, qspace_max, size_t) +CTL_RO_GEN(arenas_cspace_min, cspace_min, size_t) +CTL_RO_GEN(arenas_cspace_max, cspace_max, size_t) +CTL_RO_GEN(arenas_sspace_min, sspace_min, size_t) +CTL_RO_GEN(arenas_sspace_max, sspace_max, size_t) +CTL_RO_GEN(arenas_medium_min, medium_min, size_t) +CTL_RO_GEN(arenas_medium_max, medium_max, size_t) +CTL_RO_GEN(arenas_ntbins, ntbins, unsigned) +CTL_RO_GEN(arenas_nqbins, nqbins, unsigned) +CTL_RO_GEN(arenas_ncbins, ncbins, unsigned) +CTL_RO_GEN(arenas_nsbins, nsbins, unsigned) +CTL_RO_GEN(arenas_nmbins, nmbins, unsigned) +CTL_RO_GEN(arenas_nbins, nbins, unsigned) +CTL_RO_GEN(arenas_nlruns, nlclasses, size_t) + +/******************************************************************************/ + +#ifdef JEMALLOC_STATS +CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t) +CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t) +CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t) +CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t) +CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t) +CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t) +CTL_RO_GEN(stats_arenas_i_small_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_small, size_t) +CTL_RO_GEN(stats_arenas_i_small_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_small, uint64_t) +CTL_RO_GEN(stats_arenas_i_small_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_small, uint64_t) +CTL_RO_GEN(stats_arenas_i_medium_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_medium, size_t) +CTL_RO_GEN(stats_arenas_i_medium_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_medium, uint64_t) +CTL_RO_GEN(stats_arenas_i_medium_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_medium, uint64_t) +CTL_RO_GEN(stats_arenas_i_large_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) +CTL_RO_GEN(stats_arenas_i_large_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) +CTL_RO_GEN(stats_arenas_i_large_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) + +CTL_RO_GEN(stats_arenas_i_bins_j_nrequests, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) +#ifdef JEMALLOC_TCACHE +CTL_RO_GEN(stats_arenas_i_bins_j_nfills, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_nflushes, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) +#endif +CTL_RO_GEN(stats_arenas_i_bins_j_nruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_nreruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) +CTL_RO_GEN(stats_arenas_i_bins_j_highruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t) +CTL_RO_GEN(stats_arenas_i_bins_j_curruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + +const ctl_node_t * +stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nbins) + return (NULL); + return (super_stats_arenas_i_bins_j_node); +} + +CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) +CTL_RO_GEN(stats_arenas_i_lruns_j_curruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) +CTL_RO_GEN(stats_arenas_i_lruns_j_highruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t) + +const ctl_node_t * +stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nlclasses) + return (NULL); + return (super_stats_arenas_i_lruns_j_node); +} + +#endif +CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +#ifdef JEMALLOC_STATS +CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped, + size_t) +CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge, + uint64_t) +CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise, + uint64_t) +CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged, + uint64_t) +#endif + +const ctl_node_t * +stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (ctl_stats.arenas[i].initialized == false) + return (NULL); + return (super_stats_arenas_i_node); +} + +#ifdef JEMALLOC_STATS +CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t) +CTL_RO_GEN(stats_active, ctl_stats.active, size_t) +CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t) +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_SWAP +# ifdef JEMALLOC_STATS +CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t) +# endif + +static int +swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (swap_enabled) { + READONLY(); + } else { + /* + * swap_prezeroed isn't actually used by the swap code until it + * is set during a successfull chunk_swap_enabled() call. We + * use it here to store the value that we'll pass to + * chunk_swap_enable() in a swap.fds mallctl(). This is not + * very clean, but the obvious alternatives are even worse. + */ + WRITE(swap_prezeroed, bool); + } + + READ(swap_prezeroed, bool); + + ret = 0; +RETURN: + return (ret); +} + +CTL_RO_GEN(swap_nfds, swap_nfds, size_t) + +static int +swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + + if (swap_enabled) { + READONLY(); + } else if (newp != NULL) { + size_t nfds = newlen / sizeof(int); + + { + int fds[nfds]; + + memcpy(fds, newp, nfds * sizeof(int)); + if (chunk_swap_enable(fds, nfds, swap_prezeroed)) { + ret = EFAULT; + goto RETURN; + } + } + } + + if (oldp != NULL && oldlenp != NULL) { + if (*oldlenp != swap_nfds * sizeof(int)) { + size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp) + ? swap_nfds * sizeof(int) : *oldlenp; + + memcpy(oldp, swap_fds, copylen); + ret = EINVAL; + goto RETURN; + } else + memcpy(oldp, swap_fds, *oldlenp); + } + + ret = 0; +RETURN: + return (ret); +} +#endif diff --git a/jemalloc/src/jemalloc_stats.c b/jemalloc/src/jemalloc_stats.c index d1c4ab5d..3125dd53 100644 --- a/jemalloc/src/jemalloc_stats.c +++ b/jemalloc/src/jemalloc_stats.c @@ -1,6 +1,39 @@ #define JEMALLOC_STATS_C_ #include "internal/jemalloc_internal.h" +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_I_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = i; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_J_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = j; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_IJ_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = i; \ + mib[4] = j; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + /******************************************************************************/ /* Data. */ @@ -9,9 +42,17 @@ bool opt_stats_print = false; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static void -malloc_vcprintf(void (*write4)(void *, const char *, const char *, const char *, - const char *), void *w4opaque, const char *format, va_list ap); +#ifdef JEMALLOC_STATS +static void malloc_vcprintf(void (*write4)(void *, const char *, + const char *, const char *, const char *), void *w4opaque, + const char *format, va_list ap); +static void stats_arena_bins_print(void (*write4)(void *, const char *, + const char *, const char *, const char *), void *w4opaque, unsigned i); +static void stats_arena_lruns_print(void (*write4)(void *, const char *, + const char *, const char *, const char *), void *w4opaque, unsigned i); +static void stats_arena_print(void (*write4)(void *, const char *, + const char *, const char *, const char *), void *w4opaque, unsigned i); +#endif /******************************************************************************/ @@ -106,10 +147,221 @@ malloc_printf(const char *format, ...) } #endif +#ifdef JEMALLOC_STATS +static void +stats_arena_bins_print(void (*write4)(void *, const char *, const char *, + const char *, const char *), void *w4opaque, unsigned i) +{ + size_t pagesize; + bool config_tcache; + unsigned nbins, j, gap_start; + + CTL_GET("arenas.pagesize", &pagesize, size_t); + + CTL_GET("config.tcache", &config_tcache, bool); + if (config_tcache) { + malloc_cprintf(write4, w4opaque, + "bins: bin size regs pgs nrequests " + "nfills nflushes newruns reruns maxruns curruns\n"); + } else { + malloc_cprintf(write4, w4opaque, + "bins: bin size regs pgs nrequests " + "newruns reruns maxruns curruns\n"); + } + CTL_GET("arenas.nbins", &nbins, unsigned); + for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { + uint64_t nruns; + + CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); + if (nruns == 0) { + if (gap_start == UINT_MAX) + gap_start = j; + } else { + unsigned ntbins_, nqbins, ncbins, nsbins; + size_t reg_size, run_size; + uint32_t nregs; + uint64_t nrequests, nfills, nflushes, reruns; + size_t highruns, curruns; + + if (gap_start != UINT_MAX) { + if (j > gap_start + 1) { + /* Gap of more than one size class. */ + malloc_cprintf(write4, w4opaque, + "[%u..%u]\n", gap_start, + j - 1); + } else { + /* Gap of one size class. */ + malloc_cprintf(write4, w4opaque, + "[%u]\n", gap_start); + } + gap_start = UINT_MAX; + } + CTL_GET("arenas.ntbins", &ntbins_, unsigned); + CTL_GET("arenas.nqbins", &nqbins, unsigned); + CTL_GET("arenas.ncbins", &ncbins, unsigned); + CTL_GET("arenas.nsbins", &nsbins, unsigned); + CTL_J_GET("arenas.bin.0.size", ®_size, size_t); + CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); + CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", + &nrequests, uint64_t); + if (config_tcache) { + CTL_IJ_GET("stats.arenas.0.bins.0.nfills", + &nfills, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", + &nflushes, uint64_t); + } + CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, + uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns, + size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, + size_t); + if (config_tcache) { + malloc_cprintf(write4, w4opaque, + "%13u %1s %5u %4u %3u %10"PRIu64" %9"PRIu64 + " %9"PRIu64" %9"PRIu64"" + " %9"PRIu64" %7zu %7zu\n", + j, + j < ntbins_ ? "T" : j < ntbins_ + nqbins ? + "Q" : j < ntbins_ + nqbins + ncbins ? "C" : + j < ntbins_ + nqbins + ncbins + nsbins ? "S" + : "M", + reg_size, nregs, run_size / pagesize, + nrequests, nfills, nflushes, nruns, reruns, + highruns, curruns); + } else { + malloc_cprintf(write4, w4opaque, + "%13u %1s %5u %4u %3u %10"PRIu64" %9"PRIu64 + " %9"PRIu64" %7zu %7zu\n", + j, + j < ntbins_ ? "T" : j < ntbins_ + nqbins ? + "Q" : j < ntbins_ + nqbins + ncbins ? "C" : + j < ntbins_ + nqbins + ncbins + nsbins ? "S" + : "M", + reg_size, nregs, run_size / pagesize, + nrequests, nruns, reruns, highruns, + curruns); + } + } + } + if (gap_start != UINT_MAX) { + if (j > gap_start + 1) { + /* Gap of more than one size class. */ + malloc_cprintf(write4, w4opaque, "[%u..%u]\n", + gap_start, j - 1); + } else { + /* Gap of one size class. */ + malloc_cprintf(write4, w4opaque, "[%u]\n", gap_start); + } + } +} + +static void +stats_arena_lruns_print(void (*write4)(void *, const char *, const char *, + const char *, const char *), void *w4opaque, unsigned i) +{ + size_t pagesize, nlruns, j; + ssize_t gap_start; + + CTL_GET("arenas.pagesize", &pagesize, size_t); + + malloc_cprintf(write4, w4opaque, + "large: size pages nrequests maxruns curruns\n"); + CTL_GET("arenas.nlruns", &nlruns, unsigned); + for (j = 0, gap_start = -1; j < nlruns; j++) { + uint64_t nrequests; + size_t run_size, highruns, curruns; + + CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, + uint64_t); + if (nrequests == 0) { + if (gap_start == -1) + gap_start = j; + } else { + CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns, + size_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, + size_t); + if (gap_start != -1) { + malloc_cprintf(write4, w4opaque, "[%zu]\n", + j - gap_start); + gap_start = -1; + } + malloc_cprintf(write4, w4opaque, + "%13zu %5zu %9"PRIu64" %9zu %9zu\n", + run_size, run_size / pagesize, nrequests, highruns, + curruns); + } + } + if (gap_start != -1) + malloc_cprintf(write4, w4opaque, "[%zu]\n", j - gap_start); +} + +static void +stats_arena_print(void (*write4)(void *, const char *, const char *, + const char *, const char *), void *w4opaque, unsigned i) +{ + size_t pactive, pdirty, mapped; + uint64_t npurge, nmadvise, purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc; + size_t medium_allocated; + uint64_t medium_nmalloc, medium_ndalloc; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc; + + CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); + CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); + CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); + CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); + CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); + malloc_cprintf(write4, w4opaque, + "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," + " %"PRIu64" madvise%s, %"PRIu64" purged\n", + pactive, pdirty, npurge, npurge == 1 ? "" : "s", + nmadvise, nmadvise == 1 ? "" : "s", purged); + + malloc_cprintf(write4, w4opaque, + " allocated nmalloc ndalloc\n"); + CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); + CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); + malloc_cprintf(write4, w4opaque, + "small: %12zu %12"PRIu64" %12"PRIu64"\n", + small_allocated, small_nmalloc, small_ndalloc); + CTL_I_GET("stats.arenas.0.medium.allocated", &medium_allocated, size_t); + CTL_I_GET("stats.arenas.0.medium.nmalloc", &medium_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.medium.ndalloc", &medium_ndalloc, uint64_t); + malloc_cprintf(write4, w4opaque, + "medium: %12zu %12"PRIu64" %12"PRIu64"\n", + medium_allocated, medium_nmalloc, medium_ndalloc); + CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); + CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); + malloc_cprintf(write4, w4opaque, + "large: %12zu %12"PRIu64" %12"PRIu64"\n", + large_allocated, large_nmalloc, large_ndalloc); + malloc_cprintf(write4, w4opaque, + "total: %12zu %12"PRIu64" %12"PRIu64"\n", + small_allocated + medium_allocated + large_allocated, + small_nmalloc + medium_nmalloc + large_nmalloc, + small_ndalloc + medium_ndalloc + large_ndalloc); + CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); + malloc_cprintf(write4, w4opaque, "mapped: %12zu\n", mapped); + + stats_arena_bins_print(write4, w4opaque, i); + stats_arena_lruns_print(write4, w4opaque, i); +} +#endif + void stats_print(void (*write4)(void *, const char *, const char *, const char *, const char *), void *w4opaque, const char *opts) { + uint64_t epoch; + size_t u64sz; char s[UMAX2S_BUFSIZE]; bool general = true; bool merged = true; @@ -117,6 +369,11 @@ stats_print(void (*write4)(void *, const char *, const char *, const char *, bool bins = true; bool large = true; + /* Refresh stats, in case mallctl() was called by the application. */ + epoch = 1; + u64sz = sizeof(uint64_t); + xmallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); + if (write4 == NULL) { /* * The caller did not provide an alternate write4 callback @@ -154,165 +411,175 @@ stats_print(void (*write4)(void *, const char *, const char *, const char *, write4(w4opaque, "___ Begin jemalloc statistics ___\n", "", "", ""); if (general) { - write4(w4opaque, "Assertions ", -#ifdef NDEBUG - "disabled", -#else - "enabled", -#endif + int err; + bool bv; + unsigned uv; + ssize_t ssv; + size_t sv, bsz, usz, ssz, sssz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + + CTL_GET("config.debug", &bv, bool); + write4(w4opaque, "Assertions ", bv ? "enabled" : "disabled", "\n", ""); - write4(w4opaque, "Boolean JEMALLOC_OPTIONS: ", - opt_abort ? "A" : "a", "", ""); -#ifdef JEMALLOC_FILL - write4(w4opaque, opt_junk ? "J" : "j", "", "", ""); -#endif -#ifdef JEMALLOC_SWAP - write4(w4opaque, opt_overcommit ? "O" : "o", "", "", ""); -#endif + + write4(w4opaque, "Boolean JEMALLOC_OPTIONS: ", "", "", ""); + if ((err = mallctl("opt.abort", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "A" : "a", "", "", ""); + if ((err = mallctl("opt.junk", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "J" : "j", "", "", ""); + if ((err = mallctl("opt.overcommit", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "O" : "o", "", "", ""); write4(w4opaque, "P", "", "", ""); -#ifdef JEMALLOC_TCACHE - write4(w4opaque, opt_tcache_sort ? "S" : "s", "", "", ""); -#endif -#ifdef JEMALLOC_TRACE - write4(w4opaque, opt_trace ? "T" : "t", "", "", ""); -#endif -#ifdef JEMALLOC_SYSV - write4(w4opaque, opt_sysv ? "V" : "v", "", "", ""); -#endif -#ifdef JEMALLOC_XMALLOC - write4(w4opaque, opt_xmalloc ? "X" : "x", "", "", ""); -#endif -#ifdef JEMALLOC_FILL - write4(w4opaque, opt_zero ? "Z" : "z", "", "", ""); -#endif + if ((err = mallctl("opt.tcache_sort", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "S" : "s", "", "", ""); + if ((err = mallctl("opt.trace", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "T" : "t", "", "", ""); + if ((err = mallctl("opt.sysv", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "V" : "v", "", "", ""); + if ((err = mallctl("opt.xmalloc", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "X" : "x", "", "", ""); + if ((err = mallctl("opt.zero", &bv, &bsz, NULL, 0)) == 0) + write4(w4opaque, bv ? "Z" : "z", "", "", ""); write4(w4opaque, "\n", "", "", ""); write4(w4opaque, "CPUs: ", umax2s(ncpus, 10, s), "\n", ""); - write4(w4opaque, "Max arenas: ", umax2s(narenas, 10, s), "\n", - ""); + + CTL_GET("arenas.narenas", &uv, unsigned); + write4(w4opaque, "Max arenas: ", umax2s(uv, 10, s), "\n", ""); + write4(w4opaque, "Pointer size: ", umax2s(sizeof(void *), 10, s), "\n", ""); - write4(w4opaque, "Quantum size: ", umax2s(QUANTUM, 10, s), "\n", + + CTL_GET("arenas.quantum", &sv, size_t); + write4(w4opaque, "Quantum size: ", umax2s(sv, 10, s), "\n", ""); + + CTL_GET("arenas.cacheline", &sv, size_t); + write4(w4opaque, "Cacheline size (assumed): ", umax2s(sv, 10, + s), "\n", ""); + + CTL_GET("arenas.subpage", &sv, size_t); + write4(w4opaque, "Subpage spacing: ", umax2s(sv, 10, s), "\n", ""); - write4(w4opaque, "Cacheline size (assumed): ", umax2s(CACHELINE, - 10, s), - "\n", ""); - write4(w4opaque, "Subpage spacing: ", umax2s(SUBPAGE, 10, s), - "\n", ""); - write4(w4opaque, "Medium spacing: ", umax2s((1U << lg_mspace), - 10, s), "\n", ""); -#ifdef JEMALLOC_TINY - write4(w4opaque, "Tiny 2^n-spaced sizes: [", umax2s((1U << - LG_TINY_MIN), 10, s), "..", ""); - write4(w4opaque, umax2s((qspace_min >> 1), 10, s), "]\n", "", + + CTL_GET("arenas.medium", &sv, size_t); + write4(w4opaque, "Medium spacing: ", umax2s(sv, 10, s), "\n", ""); -#endif - write4(w4opaque, "Quantum-spaced sizes: [", umax2s(qspace_min, - 10, s), "..", ""); - write4(w4opaque, umax2s(qspace_max, 10, s), "]\n", "", ""); - write4(w4opaque, "Cacheline-spaced sizes: [", umax2s(cspace_min, - 10, s), "..", ""); - write4(w4opaque, umax2s(cspace_max, 10, s), "]\n", "", ""); - write4(w4opaque, "Subpage-spaced sizes: [", umax2s(sspace_min, - 10, s), "..", ""); - write4(w4opaque, umax2s(sspace_max, 10, s), "]\n", "", ""); - write4(w4opaque, "Medium sizes: [", umax2s(medium_min, 10, s), + + if ((err = mallctl("arenas.tspace_min", &sv, &ssz, NULL, 0)) == + 0) { + write4(w4opaque, "Tiny 2^n-spaced sizes: [", umax2s(sv, + 10, s), "..", ""); + + CTL_GET("arenas.tspace_max", &sv, size_t); + write4(w4opaque, umax2s(sv, 10, s), "]\n", "", ""); + } + + CTL_GET("arenas.qspace_min", &sv, size_t); + write4(w4opaque, "Quantum-spaced sizes: [", umax2s(sv, 10, s), "..", ""); - write4(w4opaque, umax2s(medium_max, 10, s), "]\n", "", ""); - if (opt_lg_dirty_mult >= 0) { + CTL_GET("arenas.qspace_max", &sv, size_t); + write4(w4opaque, umax2s(sv, 10, s), "]\n", "", ""); + + CTL_GET("arenas.cspace_min", &sv, size_t); + write4(w4opaque, "Cacheline-spaced sizes: [", umax2s(sv, 10, s), + "..", ""); + CTL_GET("arenas.cspace_max", &sv, size_t); + write4(w4opaque, umax2s(sv, 10, s), "]\n", "", ""); + + CTL_GET("arenas.sspace_min", &sv, size_t); + write4(w4opaque, "Subpage-spaced sizes: [", umax2s(sv, 10, s), + "..", ""); + CTL_GET("arenas.sspace_max", &sv, size_t); + write4(w4opaque, umax2s(sv, 10, s), "]\n", "", ""); + + CTL_GET("arenas.medium_min", &sv, size_t); + write4(w4opaque, "Medium sizes: [", umax2s(sv, 10, s), "..", + ""); + CTL_GET("arenas.medium_max", &sv, size_t); + write4(w4opaque, umax2s(sv, 10, s), "]\n", "", ""); + + CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); + if (ssv >= 0) { write4(w4opaque, "Min active:dirty page ratio per arena: ", - umax2s((1U << opt_lg_dirty_mult), 10, s), ":1\n", - ""); + umax2s((1U << ssv), 10, s), ":1\n", ""); } else { write4(w4opaque, "Min active:dirty page ratio per arena: N/A\n", "", "", ""); } #ifdef JEMALLOC_TCACHE - write4(w4opaque, "Thread cache slots per size class: ", - tcache_nslots ? umax2s(tcache_nslots, 10, s) : "N/A", "\n", - ""); - write4(w4opaque, "Thread cache GC sweep interval: ", - (tcache_nslots && tcache_gc_incr > 0) ? - umax2s((1U << opt_lg_tcache_gc_sweep), 10, s) : "N/A", - "", ""); - write4(w4opaque, " (increment interval: ", - (tcache_nslots && tcache_gc_incr > 0) ? - umax2s(tcache_gc_incr, 10, s) : "N/A", - ")\n", ""); + if ((err = mallctl("opt.lg_tcache_nslots", &sv, &ssz, NULL, 0)) + == 0) { + size_t tcache_nslots, tcache_gc_sweep; + + tcache_nslots = (1U << sv); + write4(w4opaque, "Thread cache slots per size class: ", + tcache_nslots ? umax2s(tcache_nslots, 10, s) : + "N/A", "\n", ""); + + CTL_GET("opt.lg_tcache_gc_sweep", &ssv, ssize_t); + tcache_gc_sweep = (1U << ssv); + write4(w4opaque, "Thread cache GC sweep interval: ", + tcache_nslots && ssv >= 0 ? umax2s(tcache_gc_sweep, + 10, s) : "N/A", "\n", ""); + } #endif - write4(w4opaque, "Chunk size: ", umax2s(chunksize, 10, s), "", - ""); - write4(w4opaque, " (2^", umax2s(opt_lg_chunk, 10, s), ")\n", - ""); + CTL_GET("arenas.chunksize", &sv, size_t); + write4(w4opaque, "Chunk size: ", umax2s(sv, 10, s), "", ""); + CTL_GET("opt.lg_chunk", &sv, size_t); + write4(w4opaque, " (2^", umax2s(sv, 10, s), ")\n", ""); } #ifdef JEMALLOC_STATS { + int err; + size_t ssz, u64sz; size_t allocated, mapped; - unsigned i; - arena_t *arena; + size_t chunks_current, chunks_high, swap_avail; + uint64_t chunks_total; + size_t huge_allocated; + uint64_t huge_nmalloc, huge_ndalloc; - /* Calculate and print allocated/mapped stats. */ - - /* arenas. */ - for (i = 0, allocated = 0; i < narenas; i++) { - if (arenas[i] != NULL) { - malloc_mutex_lock(&arenas[i]->lock); - allocated += arenas[i]->stats.allocated_small; - allocated += arenas[i]->stats.allocated_large; - malloc_mutex_unlock(&arenas[i]->lock); - } - } - - /* huge/base. */ - malloc_mutex_lock(&huge_mtx); - allocated += huge_allocated; - mapped = stats_chunks.curchunks * chunksize; - malloc_mutex_unlock(&huge_mtx); + ssz = sizeof(size_t); + u64sz = sizeof(uint64_t); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.mapped", &mapped, size_t); malloc_cprintf(write4, w4opaque, "Allocated: %zu, mapped: %zu\n", allocated, mapped); /* Print chunk stats. */ - { - chunk_stats_t chunks_stats; -#ifdef JEMALLOC_SWAP - size_t swap_avail_chunks; -#endif - - malloc_mutex_lock(&huge_mtx); - chunks_stats = stats_chunks; - malloc_mutex_unlock(&huge_mtx); - -#ifdef JEMALLOC_SWAP - malloc_mutex_lock(&swap_mtx); - swap_avail_chunks = swap_avail >> opt_lg_chunk; - malloc_mutex_unlock(&swap_mtx); -#endif + CTL_GET("stats.chunks.total", &chunks_total, uint64_t); + CTL_GET("stats.chunks.high", &chunks_high, size_t); + CTL_GET("stats.chunks.current", &chunks_current, size_t); + if ((err = mallctl("swap.avail", &swap_avail, &ssz, + NULL, 0)) == 0) { + size_t lg_chunk; malloc_cprintf(write4, w4opaque, "chunks: nchunks " - "highchunks curchunks" -#ifdef JEMALLOC_SWAP - " swap_avail" -#endif - "\n"); + "highchunks curchunks swap_avail\n"); + CTL_GET("opt.lg_chunk", &lg_chunk, size_t); malloc_cprintf(write4, w4opaque, - " %13"PRIu64"%13zu%13zu" -#ifdef JEMALLOC_SWAP - "%13zu" -#endif - "\n", - chunks_stats.nchunks, chunks_stats.highchunks, - chunks_stats.curchunks -#ifdef JEMALLOC_SWAP - , swap_avail_chunks -#endif - ); + " %13"PRIu64"%13zu%13zu%13zu\n", + chunks_total, chunks_high, chunks_current, + swap_avail << lg_chunk); + } else { + malloc_cprintf(write4, w4opaque, "chunks: nchunks " + "highchunks curchunks\n"); + malloc_cprintf(write4, w4opaque, + " %13"PRIu64"%13zu%13zu\n", + chunks_total, chunks_high, chunks_current); } - /* Print chunk stats. */ + /* Print huge stats. */ + CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); + CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); + CTL_GET("stats.huge.allocated", &huge_allocated, size_t); malloc_cprintf(write4, w4opaque, "huge: nmalloc ndalloc allocated\n"); malloc_cprintf(write4, w4opaque, @@ -320,56 +587,58 @@ stats_print(void (*write4)(void *, const char *, const char *, const char *, huge_nmalloc, huge_ndalloc, huge_allocated); if (merged) { - unsigned nmerged; - size_t nactive, ndirty; - arena_stats_t astats; - malloc_bin_stats_t bstats[nbins]; - malloc_large_stats_t lstats[((chunksize - PAGE_SIZE) >> - PAGE_SHIFT)]; + unsigned narenas; + size_t usz; - nactive = 0; - ndirty = 0; - memset(&astats, 0, sizeof(astats)); - memset(bstats, 0, sizeof(bstats)); - memset(lstats, 0, sizeof(lstats)); + usz = sizeof(unsigned); + CTL_GET("arenas.narenas", &narenas, unsigned); + { + bool initialized[narenas]; + size_t isz; + unsigned i, ninitialized; - /* Create merged arena stats. */ - for (i = nmerged = 0; i < narenas; i++) { - arena = arenas[i]; - if (arena != NULL) { - malloc_mutex_lock(&arena->lock); - arena_stats_merge(arena, &nactive, - &ndirty, &astats, bstats, lstats); - malloc_mutex_unlock(&arena->lock); - nmerged++; + isz = sizeof(initialized); + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; } - } - if (nmerged > 1) { - /* Print merged arena stats. */ - malloc_cprintf(write4, w4opaque, - "\nMerge arenas stats:\n"); - /* - * arenas[0] is used only for invariant bin - * settings. - */ - arena_stats_mprint(arenas[0], nactive, ndirty, - &astats, bstats, lstats, bins, large, - write4, w4opaque); + if (ninitialized > 1) { + /* Print merged arena stats. */ + malloc_cprintf(write4, w4opaque, + "\nMerge arenas stats:\n"); + stats_arena_print(write4, w4opaque, + narenas); + } } } if (unmerged) { + unsigned narenas; + size_t usz; + /* Print stats for each arena. */ - for (i = 0; i < narenas; i++) { - arena = arenas[i]; - if (arena != NULL) { - malloc_cprintf(write4, w4opaque, - "\narenas[%u]:\n", i); - malloc_mutex_lock(&arena->lock); - arena_stats_print(arena, bins, large, - write4, w4opaque); - malloc_mutex_unlock(&arena->lock); + + usz = sizeof(unsigned); + CTL_GET("arenas.narenas", &narenas, unsigned); + { + bool initialized[narenas]; + size_t isz; + unsigned i; + + isz = sizeof(initialized); + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + + for (i = 0; i < narenas; i++) { + if (initialized[i]) { + malloc_cprintf(write4, w4opaque, + "\narenas[%u]:\n", i); + stats_arena_print(write4, + w4opaque, i); + } } } } diff --git a/jemalloc/src/jemalloc_trace.c b/jemalloc/src/jemalloc_trace.c index 657a0968..89d30a73 100644 --- a/jemalloc/src/jemalloc_trace.c +++ b/jemalloc/src/jemalloc_trace.c @@ -254,11 +254,13 @@ trace_thread_cleanup(void *arg) trace_thread_exit(); } -void +bool trace_boot(void) { - malloc_mutex_init(&trace_mtx); + if (malloc_mutex_init(&trace_mtx)) + return (true); + /* Flush trace buffers at exit. */ atexit(malloc_trace_flush_all); /* Receive thread exit notifications. */ @@ -267,6 +269,8 @@ trace_boot(void) ": Error in pthread_key_create()\n", "", ""); abort(); } + + return (false); } /******************************************************************************/ #endif /* JEMALLOC_TRACE */