Merge branch 'dev'

This commit is contained in:
Jason Evans 2012-10-16 10:40:57 -07:00
commit 3b1f3aca54
35 changed files with 1222 additions and 339 deletions

2
.gitignore vendored
View File

@ -18,7 +18,7 @@
/src/*.[od] /src/*.[od]
/test/*.[od] /test/*.[od]
/test/*.out /test/*.out
/test/[a-z]* /test/[a-zA-Z_]*
!test/*.c !test/*.c
!test/*.exp !test/*.exp
/VERSION /VERSION

View File

@ -6,6 +6,32 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git git://canonware.com/jemalloc.git
* 3.1.0 (October 16, 2012)
New features:
- Auto-detect whether running inside Valgrind, thus removing the need to
manually specify MALLOC_CONF=valgrind:true.
- Add the "arenas.extend" mallctl, which allows applications to create
manually managed arenas.
- Add the ALLOCM_ARENA() flag for {,r,d}allocm().
- Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
which provide control over dss/mmap precedence.
- Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
- Define LG_QUANTUM for hppa.
Incompatible changes:
- Disable tcache by default if running inside Valgrind, in order to avoid
making unallocated objects appear reachable to Valgrind.
- Drop const from malloc_usable_size() argument on Linux.
Bug fixes:
- Fix heap profiling crash if sampled object is freed via realloc(p, 0).
- Remove const from __*_hook variable declarations, so that glibc can modify
them during process forking.
- Fix mlockall(2)/madvise(2) interaction.
- Fix fork(2)-related deadlocks.
- Fix error return value for "thread.tcache.enabled" mallctl.
* 3.0.0 (May 11, 2012) * 3.0.0 (May 11, 2012)
Although this version adds some major new features, the primary focus is on Although this version adds some major new features, the primary focus is on

View File

@ -101,9 +101,9 @@ DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.html)
DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3) DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(srcroot)%.3)
DOCS := $(DOCS_HTML) $(DOCS_MAN3) DOCS := $(DOCS_HTML) $(DOCS_MAN3)
CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \ CTESTS := $(srcroot)test/aligned_alloc.c $(srcroot)test/allocated.c \
$(srcroot)test/bitmap.c $(srcroot)test/mremap.c \ $(srcroot)test/ALLOCM_ARENA.c $(srcroot)test/bitmap.c \
$(srcroot)test/posix_memalign.c $(srcroot)test/thread_arena.c \ $(srcroot)test/mremap.c $(srcroot)test/posix_memalign.c \
$(srcroot)test/thread_tcache_enabled.c $(srcroot)test/thread_arena.c $(srcroot)test/thread_tcache_enabled.c
ifeq ($(enable_experimental), 1) ifeq ($(enable_experimental), 1)
CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c CTESTS += $(srcroot)test/allocm.c $(srcroot)test/rallocm.c
endif endif

View File

@ -237,6 +237,7 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise. dnl to make happen otherwise.
default_munmap="1" default_munmap="1"
JEMALLOC_USABLE_SIZE_CONST="const"
case "${host}" in case "${host}" in
*-*-darwin*) *-*-darwin*)
CFLAGS="$CFLAGS" CFLAGS="$CFLAGS"
@ -262,6 +263,7 @@ case "${host}" in
abi="elf" abi="elf"
AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ]) AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
AC_DEFINE([JEMALLOC_THREADED_INIT], [ ]) AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
JEMALLOC_USABLE_SIZE_CONST=""
default_munmap="0" default_munmap="0"
;; ;;
*-*-netbsd*) *-*-netbsd*)
@ -323,6 +325,7 @@ case "${host}" in
abi="elf" abi="elf"
;; ;;
esac esac
AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST])
AC_SUBST([abi]) AC_SUBST([abi])
AC_SUBST([RPATH]) AC_SUBST([RPATH])
AC_SUBST([LD_PRELOAD_VAR]) AC_SUBST([LD_PRELOAD_VAR])

View File

@ -368,6 +368,15 @@ for (i = 0; i < nbins; i++) {
object. This constraint can apply to both growth and object. This constraint can apply to both growth and
shrinkage.</para></listitem> shrinkage.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term><constant>ALLOCM_ARENA(<parameter>a</parameter>)
</constant></term>
<listitem><para>Use the arena specified by the index
<parameter>a</parameter>. This macro does not validate that
<parameter>a</parameter> specifies an arena in the valid
range.</para></listitem>
</varlistentry>
</variablelist> </variablelist>
</para> </para>
@ -785,15 +794,29 @@ for (i = 0; i < nbins; i++) {
chunk size is 4 MiB (2^22).</para></listitem> chunk size is 4 MiB (2^22).</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.dss">
<term>
<mallctl>opt.dss</mallctl>
(<type>const char *</type>)
<literal>r-</literal>
</term>
<listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
related to <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> allocation. The following
settings are supported: &ldquo;disabled&rdquo;, &ldquo;primary&rdquo;,
and &ldquo;secondary&rdquo; (default).</para></listitem>
</varlistentry>
<varlistentry id="opt.narenas"> <varlistentry id="opt.narenas">
<term> <term>
<mallctl>opt.narenas</mallctl> <mallctl>opt.narenas</mallctl>
(<type>size_t</type>) (<type>size_t</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Maximum number of arenas to use. The default maximum <listitem><para>Maximum number of arenas to use for automatic
number of arenas is four times the number of CPUs, or one if there is a multiplexing of threads and arenas. The default is four times the
single CPU.</para></listitem> number of CPUs, or one if there is a single CPU.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_dirty_mult"> <varlistentry id="opt.lg_dirty_mult">
@ -846,7 +869,9 @@ for (i = 0; i < nbins; i++) {
<literal>0x5a</literal>. This is intended for debugging and will <literal>0x5a</literal>. This is intended for debugging and will
impact performance negatively. This option is disabled by default impact performance negatively. This option is disabled by default
unless <option>--enable-debug</option> is specified during unless <option>--enable-debug</option> is specified during
configuration, in which case it is enabled by default.</para></listitem> configuration, in which case it is enabled by default unless running
inside <ulink
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.quarantine"> <varlistentry id="opt.quarantine">
@ -865,8 +890,9 @@ for (i = 0; i < nbins; i++) {
enabled. This feature is of particular use in combination with <ulink enabled. This feature is of particular use in combination with <ulink
url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts
to access quarantined objects. This is intended for debugging and will to access quarantined objects. This is intended for debugging and will
impact performance negatively. The default quarantine size is impact performance negatively. The default quarantine size is 0 unless
0.</para></listitem> running inside Valgrind, in which case the default is 16
MiB.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.redzone"> <varlistentry id="opt.redzone">
@ -885,7 +911,7 @@ for (i = 0; i < nbins; i++) {
which needs redzones in order to do effective buffer overflow/underflow which needs redzones in order to do effective buffer overflow/underflow
detection. This option is intended for debugging and will impact detection. This option is intended for debugging and will impact
performance negatively. This option is disabled by performance negatively. This option is disabled by
default.</para></listitem> default unless running inside Valgrind.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.zero"> <varlistentry id="opt.zero">
@ -926,15 +952,9 @@ for (i = 0; i < nbins; i++) {
[<option>--enable-valgrind</option>] [<option>--enable-valgrind</option>]
</term> </term>
<listitem><para><ulink url="http://valgrind.org/">Valgrind</ulink> <listitem><para><ulink url="http://valgrind.org/">Valgrind</ulink>
support enabled/disabled. If enabled, several other options are support enabled/disabled. This option is vestigal because jemalloc
automatically modified during options processing to work well with auto-detects whether it is running inside Valgrind. This option is
Valgrind: <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> disabled by default, unless running inside Valgrind.</para></listitem>
and <link linkend="opt.zero"><mallctl>opt.zero</mallctl></link> are set
to false, <link
linkend="opt.quarantine"><mallctl>opt.quarantine</mallctl></link> is
set to 16 MiB, and <link
linkend="opt.redzone"><mallctl>opt.redzone</mallctl></link> is set to
true. This option is disabled by default.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.xmalloc"> <varlistentry id="opt.xmalloc">
@ -972,7 +992,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<link <link
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link> linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
option for related tuning information. This option is enabled by option for related tuning information. This option is enabled by
default.</para></listitem> default unless running inside <ulink
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_tcache_max"> <varlistentry id="opt.lg_tcache_max">
@ -1151,11 +1172,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
<literal>rw</literal> <literal>rw</literal>
</term> </term>
<listitem><para>Get or set the arena associated with the calling <listitem><para>Get or set the arena associated with the calling
thread. The arena index must be less than the maximum number of arenas thread. If the specified arena was not initialized beforehand (see the
(see the <link <link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>
mallctl). If the specified arena was not initialized beforehand (see
the <link
linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link> linkend="arenas.initialized"><mallctl>arenas.initialized</mallctl></link>
mallctl), it will be automatically initialized as a side effect of mallctl), it will be automatically initialized as a side effect of
calling this interface.</para></listitem> calling this interface.</para></listitem>
@ -1247,13 +1265,40 @@ malloc_conf = "xmalloc:true";]]></programlisting>
the developer may find manual flushing useful.</para></listitem> the developer may find manual flushing useful.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arena.i.purge">
<term>
<mallctl>arena.&lt;i&gt;.purge</mallctl>
(<type>unsigned</type>)
<literal>--</literal>
</term>
<listitem><para>Purge unused dirty pages for arena &lt;i&gt;, or for
all arenas if &lt;i&gt; equals <link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>.
</para></listitem>
</varlistentry>
<varlistentry id="arena.i.dss">
<term>
<mallctl>arena.&lt;i&gt;.dss</mallctl>
(<type>const char *</type>)
<literal>rw</literal>
</term>
<listitem><para>Set the precedence of dss allocation as related to mmap
allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
<link
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
<link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
settings.
</para></listitem>
</varlistentry>
<varlistentry id="arenas.narenas"> <varlistentry id="arenas.narenas">
<term> <term>
<mallctl>arenas.narenas</mallctl> <mallctl>arenas.narenas</mallctl>
(<type>unsigned</type>) (<type>unsigned</type>)
<literal>r-</literal> <literal>r-</literal>
</term> </term>
<listitem><para>Maximum number of arenas.</para></listitem> <listitem><para>Current limit on number of arenas.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="arenas.initialized"> <varlistentry id="arenas.initialized">
@ -1372,6 +1417,16 @@ malloc_conf = "xmalloc:true";]]></programlisting>
for all arenas if none is specified.</para></listitem> for all arenas if none is specified.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<mallctl>arenas.extend</mallctl>
(<type>unsigned</type>)
<literal>r-</literal>
</term>
<listitem><para>Extend the array of arenas by appending a new arena,
and returning the new arena index.</para></listitem>
</varlistentry>
<varlistentry id="prof.active"> <varlistentry id="prof.active">
<term> <term>
<mallctl>prof.active</mallctl> <mallctl>prof.active</mallctl>
@ -1540,6 +1595,20 @@ malloc_conf = "xmalloc:true";]]></programlisting>
</para></listitem> </para></listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
(<type>const char *</type>)
<literal>r-</literal>
</term>
<listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
<manvolnum>2</manvolnum></citerefentry>) allocation precedence as
related to <citerefentry><refentrytitle>mmap</refentrytitle>
<manvolnum>2</manvolnum></citerefentry> allocation. See <link
linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
</para></listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl> <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
@ -1865,9 +1934,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
it detects, because the performance impact for storing such information it detects, because the performance impact for storing such information
would be prohibitive. However, jemalloc does integrate with the most would be prohibitive. However, jemalloc does integrate with the most
excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the
<option>--enable-valgrind</option> configuration option is enabled and the <option>--enable-valgrind</option> configuration option is enabled.</para>
<link linkend="opt.valgrind"><mallctl>opt.valgrind</mallctl></link> option
is enabled.</para>
</refsect1> </refsect1>
<refsect1 id="diagnostic_messages"> <refsect1 id="diagnostic_messages">
<title>DIAGNOSTIC MESSAGES</title> <title>DIAGNOSTIC MESSAGES</title>

View File

@ -331,6 +331,8 @@ struct arena_s {
uint64_t prof_accumbytes; uint64_t prof_accumbytes;
dss_prec_t dss_prec;
/* List of dirty-page-containing chunks this arena manages. */ /* List of dirty-page-containing chunks this arena manages. */
ql_head(arena_chunk_t) chunks_dirty; ql_head(arena_chunk_t) chunks_dirty;
@ -422,13 +424,16 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr); void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero); size_t extra, bool zero);
void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool try_tcache); size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
bool try_tcache_dalloc);
dss_prec_t arena_dss_prec_get(arena_t *arena);
void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats);
bool arena_new(arena_t *arena, unsigned ind); bool arena_new(arena_t *arena, unsigned ind);
void arena_boot(void); void arena_boot(void);
void arena_prefork(arena_t *arena); void arena_prefork(arena_t *arena);

View File

@ -28,6 +28,7 @@
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk; extern size_t opt_lg_chunk;
extern const char *opt_dss;
/* Protects stats_chunks; currently not used for any other purpose. */ /* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx; extern malloc_mutex_t chunks_mtx;
@ -42,9 +43,14 @@ extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */ extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */ extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero); void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec);
void chunk_unmap(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap); void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void); bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);
void chunk_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/

View File

@ -1,14 +1,28 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_H_TYPES
typedef enum {
dss_prec_disabled = 0,
dss_prec_primary = 1,
dss_prec_secondary = 2,
dss_prec_limit = 3
} dss_prec_t ;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS
extern const char *dss_prec_names[];
#endif /* JEMALLOC_H_STRUCTS */ #endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
dss_prec_t chunk_dss_prec_get(void);
bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
bool chunk_in_dss(void *chunk); bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void); bool chunk_dss_boot(void);

View File

@ -9,7 +9,7 @@
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
void pages_purge(void *addr, size_t length); bool pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size); bool chunk_dealloc_mmap(void *chunk, size_t size);

View File

@ -33,6 +33,7 @@ struct ctl_indexed_node_s {
struct ctl_arena_stats_s { struct ctl_arena_stats_s {
bool initialized; bool initialized;
unsigned nthreads; unsigned nthreads;
const char *dss;
size_t pactive; size_t pactive;
size_t pdirty; size_t pdirty;
arena_stats_t astats; arena_stats_t astats;
@ -61,6 +62,7 @@ struct ctl_stats_s {
uint64_t nmalloc; /* huge_nmalloc */ uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */ uint64_t ndalloc; /* huge_ndalloc */
} huge; } huge;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
}; };
@ -75,6 +77,9 @@ int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen); void *newp, size_t newlen);
bool ctl_boot(void); bool ctl_boot(void);
void ctl_prefork(void);
void ctl_postfork_parent(void);
void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \

View File

@ -23,6 +23,9 @@ struct extent_node_s {
/* Total region size. */ /* Total region size. */
size_t size; size_t size;
/* True if zero-filled; used by chunk recycling code. */
bool zeroed;
}; };
typedef rb_tree(extent_node_t) extent_tree_t; typedef rb_tree(extent_node_t) extent_tree_t;

View File

@ -22,7 +22,7 @@ void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra); size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero); size_t alignment, bool zero, bool try_tcache_dalloc);
void huge_dalloc(void *ptr, bool unmap); void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr); size_t huge_salloc(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr); prof_ctx_t *huge_prof_ctx_get(const void *ptr);

View File

@ -270,6 +270,9 @@ static const bool config_ivsalloc =
# ifdef __arm__ # ifdef __arm__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
# endif # endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
# ifdef __mips__ # ifdef __mips__
# define LG_QUANTUM 3 # define LG_QUANTUM 3
# endif # endif
@ -424,6 +427,7 @@ static const bool config_ivsalloc =
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0) } while (0)
#else #else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) #define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) #define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB) #define VALGRIND_FREELIKE_BLOCK(addr, rzB)
@ -510,13 +514,19 @@ extern size_t opt_narenas;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;
extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ /* Protects arenas initialization (arenas, arenas_total). */
extern malloc_mutex_t arenas_lock;
/* /*
* Arenas that are used to service external requests. Not all elements of the * Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed. * arenas array are necessarily used; arenas are created lazily as needed.
*
* arenas[0..narenas_auto) are used for automatic multiplexing of threads and
* arenas. arenas[narenas_auto..narenas_total) are only used if the application
* takes some action to create them and allocate from them.
*/ */
extern arena_t **arenas; extern arena_t **arenas;
extern unsigned narenas; extern unsigned narenas_total;
extern unsigned narenas_auto; /* Read-only after initialization. */
arena_t *arenas_extend(unsigned ind); arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg); void arenas_cleanup(void *arg);
@ -571,6 +581,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size); size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment); size_t sa2u(size_t size, size_t alignment);
unsigned narenas_total_get(void);
arena_t *choose_arena(arena_t *arena); arena_t *choose_arena(arena_t *arena);
#endif #endif
@ -675,6 +686,18 @@ sa2u(size_t size, size_t alignment)
} }
} }
JEMALLOC_INLINE unsigned
narenas_total_get(void)
{
unsigned narenas;
malloc_mutex_lock(&arenas_lock);
narenas = narenas_total;
malloc_mutex_unlock(&arenas_lock);
return (narenas);
}
/* Choose an arena based on a per-thread value. */ /* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t * JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena) choose_arena(arena_t *arena)
@ -710,15 +733,24 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h" #include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE #ifndef JEMALLOC_ENABLE_INLINE
void *imallocx(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size); void *imalloc(size_t size);
void *icallocx(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size); void *icalloc(size_t size);
void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero); void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote); size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote); size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize); size_t u2rz(size_t usize);
size_t p2rz(const void *ptr); size_t p2rz(const void *ptr);
void idallocx(void *ptr, bool try_tcache);
void idalloc(void *ptr); void idalloc(void *ptr);
void iqallocx(void *ptr, bool try_tcache);
void iqalloc(void *ptr); void iqalloc(void *ptr);
void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move); bool zero, bool no_move);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
@ -726,29 +758,44 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
imalloc(size_t size) imallocx(size_t size, bool try_tcache, arena_t *arena)
{ {
assert(size != 0); assert(size != 0);
if (size <= arena_maxclass) if (size <= arena_maxclass)
return (arena_malloc(NULL, size, false, true)); return (arena_malloc(arena, size, false, try_tcache));
else else
return (huge_malloc(size, false)); return (huge_malloc(size, false));
} }
JEMALLOC_INLINE void *
imalloc(size_t size)
{
return (imallocx(size, true, NULL));
}
JEMALLOC_INLINE void *
icallocx(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
}
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
icalloc(size_t size) icalloc(size_t size)
{ {
if (size <= arena_maxclass) return (icallocx(size, true, NULL));
return (arena_malloc(NULL, size, true, true));
else
return (huge_malloc(size, true));
} }
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero) ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{ {
void *ret; void *ret;
@ -756,11 +803,11 @@ ipalloc(size_t usize, size_t alignment, bool zero)
assert(usize == sa2u(usize, alignment)); assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE) if (usize <= arena_maxclass && alignment <= PAGE)
ret = arena_malloc(NULL, usize, zero, true); ret = arena_malloc(arena, usize, zero, try_tcache);
else { else {
if (usize <= arena_maxclass) { if (usize <= arena_maxclass) {
ret = arena_palloc(choose_arena(NULL), usize, alignment, ret = arena_palloc(choose_arena(arena), usize,
zero); alignment, zero);
} else if (alignment <= chunksize) } else if (alignment <= chunksize)
ret = huge_malloc(usize, zero); ret = huge_malloc(usize, zero);
else else
@ -771,6 +818,13 @@ ipalloc(size_t usize, size_t alignment, bool zero)
return (ret); return (ret);
} }
JEMALLOC_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
return (ipallocx(usize, alignment, zero, true, NULL));
}
/* /*
* Typical usage: * Typical usage:
* void *ptr = [...] * void *ptr = [...]
@ -829,7 +883,7 @@ p2rz(const void *ptr)
} }
JEMALLOC_INLINE void JEMALLOC_INLINE void
idalloc(void *ptr) idallocx(void *ptr, bool try_tcache)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
@ -837,24 +891,38 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) if (chunk != ptr)
arena_dalloc(chunk->arena, chunk, ptr, true); arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else else
huge_dalloc(ptr, true); huge_dalloc(ptr, true);
} }
JEMALLOC_INLINE void
idalloc(void *ptr)
{
idallocx(ptr, true);
}
JEMALLOC_INLINE void
iqallocx(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
idallocx(ptr, try_tcache);
}
JEMALLOC_INLINE void JEMALLOC_INLINE void
iqalloc(void *ptr) iqalloc(void *ptr)
{ {
if (config_fill && opt_quarantine) iqallocx(ptr, true);
quarantine(ptr);
else
idalloc(ptr);
} }
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move) bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{ {
void *ret; void *ret;
size_t oldsize; size_t oldsize;
@ -877,7 +945,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size + extra, alignment); usize = sa2u(size + extra, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipalloc(usize, alignment, zero); ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) { if (ret == NULL) {
if (extra == 0) if (extra == 0)
return (NULL); return (NULL);
@ -885,7 +953,8 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size, alignment); usize = sa2u(size, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipalloc(usize, alignment, zero); ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
arena);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
} }
@ -896,7 +965,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/ */
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
iqalloc(ptr); iqallocx(ptr, try_tcache_dalloc);
return (ret); return (ret);
} }
@ -910,15 +979,25 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
} }
} else { } else {
if (size + extra <= arena_maxclass) { if (size + extra <= arena_maxclass) {
return (arena_ralloc(ptr, oldsize, size, extra, return (arena_ralloc(arena, ptr, oldsize, size, extra,
alignment, zero, true)); alignment, zero, try_tcache_alloc,
try_tcache_dalloc));
} else { } else {
return (huge_ralloc(ptr, oldsize, size, extra, return (huge_ralloc(ptr, oldsize, size, extra,
alignment, zero)); alignment, zero, try_tcache_dalloc));
} }
} }
} }
JEMALLOC_INLINE void *
iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
bool no_move)
{
return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
NULL));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t) malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)

View File

@ -12,6 +12,8 @@
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) #define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) #define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) #define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc) #define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large) #define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small) #define arena_malloc_small JEMALLOC_N(arena_malloc_small)
@ -51,14 +53,13 @@
#define arena_stats_merge JEMALLOC_N(arena_stats_merge) #define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) #define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas) #define arenas JEMALLOC_N(arenas)
#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_booted JEMALLOC_N(arenas_booted) #define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup) #define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend) #define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized) #define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock) #define arenas_lock JEMALLOC_N(arenas_lock)
#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define arenas_tls JEMALLOC_N(arenas_tls) #define arenas_tls JEMALLOC_N(arenas_tls)
#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) #define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) #define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) #define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
@ -101,9 +102,15 @@
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) #define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) #define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) #define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss) #define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages) #define chunk_npages JEMALLOC_N(chunk_npages)
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx) #define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree) #define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize) #define chunksize JEMALLOC_N(chunksize)
@ -129,6 +136,10 @@
#define ctl_bymib JEMALLOC_N(ctl_bymib) #define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname) #define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib) #define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) #define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) #define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) #define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@ -161,6 +172,7 @@
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) #define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) #define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) #define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash) #define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated) #define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot) #define huge_boot JEMALLOC_N(huge_boot)
@ -180,11 +192,17 @@
#define huge_salloc JEMALLOC_N(huge_salloc) #define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm) #define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc) #define icalloc JEMALLOC_N(icalloc)
#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc) #define idalloc JEMALLOC_N(idalloc)
#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc) #define imalloc JEMALLOC_N(imalloc)
#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc) #define ipalloc JEMALLOC_N(ipalloc)
#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc) #define iqalloc JEMALLOC_N(iqalloc)
#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc) #define iralloc JEMALLOC_N(iralloc)
#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc) #define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded) #define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc) #define ivsalloc JEMALLOC_N(ivsalloc)
@ -212,7 +230,9 @@
#define map_bias JEMALLOC_N(map_bias) #define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write) #define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot) #define mutex_boot JEMALLOC_N(mutex_boot)
#define narenas JEMALLOC_N(narenas) #define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_total JEMALLOC_N(narenas_total)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus) #define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins) #define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort) #define opt_abort JEMALLOC_N(opt_abort)
@ -254,6 +274,9 @@
#define prof_lookup JEMALLOC_N(prof_lookup) #define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc) #define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump) #define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote) #define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc) #define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) #define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@ -264,6 +287,7 @@
#define prof_tdata_init JEMALLOC_N(prof_tdata_init) #define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) #define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) #define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) #define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) #define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) #define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
@ -278,12 +302,13 @@
#define rtree_get JEMALLOC_N(rtree_get) #define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked) #define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new) #define rtree_new JEMALLOC_N(rtree_new)
#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set) #define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u) #define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u) #define sa2u JEMALLOC_N(sa2u)
#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index) #define set_errno JEMALLOC_N(set_errno)
#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
#define stats_cactive JEMALLOC_N(stats_cactive) #define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add) #define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get) #define stats_cactive_get JEMALLOC_N(stats_cactive_get)
@ -311,6 +336,7 @@
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) #define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) #define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) #define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) #define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) #define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) #define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
@ -325,6 +351,7 @@
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) #define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) #define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls) #define tcache_tls JEMALLOC_N(tcache_tls)
#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) #define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) #define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) #define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
@ -332,6 +359,7 @@
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) #define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) #define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) #define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) #define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) #define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) #define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)

View File

@ -223,6 +223,9 @@ void prof_tdata_cleanup(void *arg);
void prof_boot0(void); void prof_boot0(void);
void prof_boot1(void); void prof_boot1(void);
bool prof_boot2(void); bool prof_boot2(void);
void prof_prefork(void);
void prof_postfork_parent(void);
void prof_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/
@ -506,7 +509,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)cnt > (uintptr_t)1U) { if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx); prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++; cnt->epoch++;
} else } else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/ /*********/
mb_write(); mb_write();

View File

@ -36,6 +36,9 @@ struct rtree_s {
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits); rtree_t *rtree_new(unsigned bits);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/ /******************************************************************************/

View File

@ -25,6 +25,8 @@ extern "C" {
#endif #endif
#define ALLOCM_ZERO ((int)0x40) #define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80) #define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0 #define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1 #define ALLOCM_ERR_OOM 1
@ -59,7 +61,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif #endif
JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr); JEMALLOC_EXPORT size_t je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts); const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,

View File

@ -221,6 +221,15 @@
#undef JEMALLOC_OVERRIDE_MEMALIGN #undef JEMALLOC_OVERRIDE_MEMALIGN
#undef JEMALLOC_OVERRIDE_VALLOC #undef JEMALLOC_OVERRIDE_VALLOC
/*
* At least Linux omits the "const" in:
*
* size_t malloc_usable_size(const void *ptr);
*
* Match the operating system's prototype.
*/
#undef JEMALLOC_USABLE_SIZE_CONST
/* /*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/ */

View File

@ -372,7 +372,7 @@ arena_chunk_alloc(arena_t *arena)
zero = false; zero = false;
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
false, &zero); false, &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
if (chunk == NULL) if (chunk == NULL)
return (NULL); return (NULL);
@ -551,24 +551,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
{ {
ql_head(arena_chunk_map_t) mapelms; ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm; arena_chunk_map_t *mapelm;
size_t pageind, flag_unzeroed; size_t pageind;
size_t ndirty; size_t ndirty;
size_t nmadvise; size_t nmadvise;
ql_new(&mapelms); ql_new(&mapelms);
flag_unzeroed =
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
/*
* madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
* mappings, but not for file-backed mappings.
*/
0
#else
CHUNK_MAP_UNZEROED
#endif
;
/* /*
* If chunk is the spare, temporarily re-allocate it, 1) so that its * If chunk is the spare, temporarily re-allocate it, 1) so that its
* run is reinserted into runs_avail_dirty, and 2) so that it cannot be * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
@ -603,26 +591,12 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
assert(arena_mapbits_dirty_get(chunk, pageind) == assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1)); arena_mapbits_dirty_get(chunk, pageind+npages-1));
if (arena_mapbits_dirty_get(chunk, pageind) != 0) { if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
size_t i;
arena_avail_tree_remove( arena_avail_tree_remove(
&arena->runs_avail_dirty, mapelm); &arena->runs_avail_dirty, mapelm);
arena_mapbits_unzeroed_set(chunk, pageind,
flag_unzeroed);
arena_mapbits_large_set(chunk, pageind, arena_mapbits_large_set(chunk, pageind,
(npages << LG_PAGE), 0); (npages << LG_PAGE), 0);
/*
* Update internal elements in the page map, so
* that CHUNK_MAP_UNZEROED is properly set.
*/
for (i = 1; i < npages - 1; i++) {
arena_mapbits_unzeroed_set(chunk,
pageind+i, flag_unzeroed);
}
if (npages > 1) { if (npages > 1) {
arena_mapbits_unzeroed_set(chunk,
pageind+npages-1, flag_unzeroed);
arena_mapbits_large_set(chunk, arena_mapbits_large_set(chunk,
pageind+npages-1, 0, 0); pageind+npages-1, 0, 0);
} }
@ -685,14 +659,30 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
sizeof(arena_chunk_map_t)) + map_bias; sizeof(arena_chunk_map_t)) + map_bias;
size_t npages = arena_mapbits_large_size_get(chunk, pageind) >> size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
LG_PAGE; LG_PAGE;
bool unzeroed;
size_t flag_unzeroed, i;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
assert(ndirty >= npages); assert(ndirty >= npages);
if (config_debug) if (config_debug)
ndirty -= npages; ndirty -= npages;
unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)), LG_PAGE)), (npages << LG_PAGE));
(npages << LG_PAGE)); flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
/*
* Set the unzeroed flag for all pages, now that pages_purge()
* has returned whether the pages were zeroed as a side effect
* of purging. This chunk map modification is safe even though
* the arena mutex isn't currently owned by this thread,
* because the run is marked as allocated, thus protecting it
* from being modified by any other thread. As long as these
* writes don't perturb the first and last elements'
* CHUNK_MAP_ALLOCATED bits, behavior is well defined.
*/
for (i = 0; i < npages; i++) {
arena_mapbits_unzeroed_set(chunk, pageind+i,
flag_unzeroed);
}
if (config_stats) if (config_stats)
nmadvise++; nmadvise++;
} }
@ -1629,52 +1619,6 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
mapelm = arena_mapp_get(chunk, pageind); mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
} }
void
arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*nactive += arena->nactive;
*ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
if (config_tcache) {
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
}
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock);
}
}
void void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
@ -1887,8 +1831,9 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
} }
void * void *
arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, bool try_tcache) size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
bool try_tcache_dalloc)
{ {
void *ret; void *ret;
size_t copysize; size_t copysize;
@ -1907,9 +1852,9 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t usize = sa2u(size + extra, alignment); size_t usize = sa2u(size + extra, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipalloc(usize, alignment, zero); ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
} else } else
ret = arena_malloc(NULL, size + extra, zero, try_tcache); ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
if (ret == NULL) { if (ret == NULL) {
if (extra == 0) if (extra == 0)
@ -1919,9 +1864,10 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t usize = sa2u(size, alignment); size_t usize = sa2u(size, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipalloc(usize, alignment, zero); ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
arena);
} else } else
ret = arena_malloc(NULL, size, zero, try_tcache); ret = arena_malloc(arena, size, zero, try_tcache_alloc);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
@ -1936,10 +1882,78 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
copysize = (size < oldsize) ? size : oldsize; copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
iqalloc(ptr); iqallocx(ptr, try_tcache_dalloc);
return (ret); return (ret);
} }
dss_prec_t
arena_dss_prec_get(arena_t *arena)
{
dss_prec_t ret;
malloc_mutex_lock(&arena->lock);
ret = arena->dss_prec;
malloc_mutex_unlock(&arena->lock);
return (ret);
}
void
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{
malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec;
malloc_mutex_unlock(&arena->lock);
}
void
arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
malloc_large_stats_t *lstats)
{
unsigned i;
malloc_mutex_lock(&arena->lock);
*dss = dss_prec_names[arena->dss_prec];
*nactive += arena->nactive;
*ndirty += arena->ndirty;
astats->mapped += arena->stats.mapped;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
astats->allocated_large += arena->stats.allocated_large;
astats->nmalloc_large += arena->stats.nmalloc_large;
astats->ndalloc_large += arena->stats.ndalloc_large;
astats->nrequests_large += arena->stats.nrequests_large;
for (i = 0; i < nlclasses; i++) {
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
lstats[i].nrequests += arena->stats.lstats[i].nrequests;
lstats[i].curruns += arena->stats.lstats[i].curruns;
}
malloc_mutex_unlock(&arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bstats[i].allocated += bin->stats.allocated;
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
if (config_tcache) {
bstats[i].nfills += bin->stats.nfills;
bstats[i].nflushes += bin->stats.nflushes;
}
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
malloc_mutex_unlock(&bin->lock);
}
}
bool bool
arena_new(arena_t *arena, unsigned ind) arena_new(arena_t *arena, unsigned ind)
{ {
@ -1968,6 +1982,8 @@ arena_new(arena_t *arena, unsigned ind)
if (config_prof) if (config_prof)
arena->prof_accumbytes = 0; arena->prof_accumbytes = 0;
arena->dss_prec = chunk_dss_prec_get();
/* Initialize chunks. */ /* Initialize chunks. */
ql_new(&arena->chunks_dirty); ql_new(&arena->chunks_dirty);
arena->spare = NULL; arena->spare = NULL;

View File

@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0); assert(minsize != 0);
csize = CHUNK_CEILING(minsize); csize = CHUNK_CEILING(minsize);
zero = false; zero = false;
base_pages = chunk_alloc(csize, chunksize, true, &zero); base_pages = chunk_alloc(csize, chunksize, true, &zero,
chunk_dss_prec_get());
if (base_pages == NULL) if (base_pages == NULL)
return (true); return (true);
base_next_addr = base_pages; base_next_addr = base_pages;

View File

@ -4,7 +4,8 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
size_t opt_lg_chunk = LG_CHUNK_DEFAULT; const char *opt_dss = DSS_DEFAULT;
size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
malloc_mutex_t chunks_mtx; malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks; chunk_stats_t stats_chunks;
@ -15,8 +16,10 @@ chunk_stats_t stats_chunks;
* address space. Depending on function, different tree orderings are needed, * address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents. * which is why there are two trees with the same contents.
*/ */
static extent_tree_t chunks_szad; static extent_tree_t chunks_szad_mmap;
static extent_tree_t chunks_ad; static extent_tree_t chunks_ad_mmap;
static extent_tree_t chunks_szad_dss;
static extent_tree_t chunks_ad_dss;
rtree_t *chunks_rtree; rtree_t *chunks_rtree;
@ -30,19 +33,23 @@ size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
static void *chunk_recycle(size_t size, size_t alignment, bool base, static void *chunk_recycle(extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
bool *zero); bool *zero);
static void chunk_record(void *chunk, size_t size); static void chunk_record(extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, void *chunk, size_t size);
/******************************************************************************/ /******************************************************************************/
static void * static void *
chunk_recycle(size_t size, size_t alignment, bool base, bool *zero) chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
size_t alignment, bool base, bool *zero)
{ {
void *ret; void *ret;
extent_node_t *node; extent_node_t *node;
extent_node_t key; extent_node_t key;
size_t alloc_size, leadsize, trailsize; size_t alloc_size, leadsize, trailsize;
bool zeroed;
if (base) { if (base) {
/* /*
@ -61,7 +68,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
key.addr = NULL; key.addr = NULL;
key.size = alloc_size; key.size = alloc_size;
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
node = extent_tree_szad_nsearch(&chunks_szad, &key); node = extent_tree_szad_nsearch(chunks_szad, &key);
if (node == NULL) { if (node == NULL) {
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
return (NULL); return (NULL);
@ -72,13 +79,13 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
trailsize = node->size - leadsize - size; trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize); ret = (void *)((uintptr_t)node->addr + leadsize);
/* Remove node from the tree. */ /* Remove node from the tree. */
extent_tree_szad_remove(&chunks_szad, node); extent_tree_szad_remove(chunks_szad, node);
extent_tree_ad_remove(&chunks_ad, node); extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) { if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */ /* Insert the leading space as a smaller chunk. */
node->size = leadsize; node->size = leadsize;
extent_tree_szad_insert(&chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(&chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
node = NULL; node = NULL;
} }
if (trailsize != 0) { if (trailsize != 0) {
@ -101,23 +108,24 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
} }
node->addr = (void *)((uintptr_t)(ret) + size); node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize; node->size = trailsize;
extent_tree_szad_insert(&chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
extent_tree_ad_insert(&chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
node = NULL; node = NULL;
} }
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
if (node != NULL) zeroed = false;
if (node != NULL) {
if (node->zeroed) {
zeroed = true;
*zero = true;
}
base_node_dealloc(node); base_node_dealloc(node);
#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED }
/* Pages are zeroed as a side effect of pages_purge(). */ if (zeroed == false && *zero) {
*zero = true;
#else
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
} }
#endif
return (ret); return (ret);
} }
@ -128,7 +136,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
* advantage of them if they are returned. * advantage of them if they are returned.
*/ */
void * void *
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
dss_prec_t dss_prec)
{ {
void *ret; void *ret;
@ -137,19 +146,40 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
assert(alignment != 0); assert(alignment != 0);
assert((alignment & chunksize_mask) == 0); assert((alignment & chunksize_mask) == 0);
ret = chunk_recycle(size, alignment, base, zero); /*
if (ret != NULL) * Try to recycle an existing mapping.
*/
/* "primary" dss. */
if (config_dss && dss_prec == dss_prec_primary && (ret =
chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, alignment,
base, zero)) != NULL)
goto label_return;
/* mmap. */
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
alignment, base, zero)) != NULL)
goto label_return;
/* "secondary" dss. */
if (config_dss && dss_prec == dss_prec_secondary && (ret =
chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, alignment,
base, zero)) != NULL)
goto label_return; goto label_return;
ret = chunk_alloc_mmap(size, alignment, zero); /*
if (ret != NULL) * Try to allocate a new mapping.
goto label_return; */
if (config_dss) { /* "primary" dss. */
ret = chunk_alloc_dss(size, alignment, zero); if (config_dss && dss_prec == dss_prec_primary && (ret =
if (ret != NULL) chunk_alloc_dss(size, alignment, zero)) != NULL)
goto label_return; goto label_return;
} /* mmap. */
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
goto label_return;
/* "secondary" dss. */
if (config_dss && dss_prec == dss_prec_secondary && (ret =
chunk_alloc_dss(size, alignment, zero)) != NULL)
goto label_return;
/* All strategies for allocation failed. */ /* All strategies for allocation failed. */
ret = NULL; ret = NULL;
@ -189,11 +219,13 @@ label_return:
} }
static void static void
chunk_record(void *chunk, size_t size) chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
{ {
bool unzeroed;
extent_node_t *xnode, *node, *prev, key; extent_node_t *xnode, *node, *prev, key;
pages_purge(chunk, size); unzeroed = pages_purge(chunk, size);
/* /*
* Allocate a node before acquiring chunks_mtx even though it might not * Allocate a node before acquiring chunks_mtx even though it might not
@ -205,7 +237,7 @@ chunk_record(void *chunk, size_t size)
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size); key.addr = (void *)((uintptr_t)chunk + size);
node = extent_tree_ad_nsearch(&chunks_ad, &key); node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */ /* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) { if (node != NULL && node->addr == key.addr) {
/* /*
@ -213,10 +245,11 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only * not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad. * remove/insert from/into chunks_szad.
*/ */
extent_tree_szad_remove(&chunks_szad, node); extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk; node->addr = chunk;
node->size += size; node->size += size;
extent_tree_szad_insert(&chunks_szad, node); node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node);
if (xnode != NULL) if (xnode != NULL)
base_node_dealloc(xnode); base_node_dealloc(xnode);
} else { } else {
@ -234,12 +267,13 @@ chunk_record(void *chunk, size_t size)
node = xnode; node = xnode;
node->addr = chunk; node->addr = chunk;
node->size = size; node->size = size;
extent_tree_ad_insert(&chunks_ad, node); node->zeroed = (unzeroed == false);
extent_tree_szad_insert(&chunks_szad, node); extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node);
} }
/* Try to coalesce backward. */ /* Try to coalesce backward. */
prev = extent_tree_ad_prev(&chunks_ad, node); prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) { chunk) {
/* /*
@ -247,19 +281,34 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only * not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad. * remove/insert node from/into chunks_szad.
*/ */
extent_tree_szad_remove(&chunks_szad, prev); extent_tree_szad_remove(chunks_szad, prev);
extent_tree_ad_remove(&chunks_ad, prev); extent_tree_ad_remove(chunks_ad, prev);
extent_tree_szad_remove(&chunks_szad, node); extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr; node->addr = prev->addr;
node->size += prev->size; node->size += prev->size;
extent_tree_szad_insert(&chunks_szad, node); node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
base_node_dealloc(prev); base_node_dealloc(prev);
} }
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
} }
void
chunk_unmap(void *chunk, size_t size)
{
assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk);
assert(size != 0);
assert((size & chunksize_mask) == 0);
if (config_dss && chunk_in_dss(chunk))
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
else if (chunk_dealloc_mmap(chunk, size))
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
}
void void
chunk_dealloc(void *chunk, size_t size, bool unmap) chunk_dealloc(void *chunk, size_t size, bool unmap)
{ {
@ -273,15 +322,13 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
if (config_stats || config_prof) { if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
assert(stats_chunks.curchunks >= (size / chunksize));
stats_chunks.curchunks -= (size / chunksize); stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
} }
if (unmap) { if (unmap)
if ((config_dss && chunk_in_dss(chunk)) || chunk_unmap(chunk, size);
chunk_dealloc_mmap(chunk, size))
chunk_record(chunk, size);
}
} }
bool bool
@ -301,8 +348,10 @@ chunk_boot(void)
} }
if (config_dss && chunk_dss_boot()) if (config_dss && chunk_dss_boot())
return (true); return (true);
extent_tree_szad_new(&chunks_szad); extent_tree_szad_new(&chunks_szad_mmap);
extent_tree_ad_new(&chunks_ad); extent_tree_ad_new(&chunks_ad_mmap);
extent_tree_szad_new(&chunks_szad_dss);
extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) { if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk); opt_lg_chunk);
@ -312,3 +361,33 @@ chunk_boot(void)
return (false); return (false);
} }
void
chunk_prefork(void)
{
malloc_mutex_lock(&chunks_mtx);
if (config_ivsalloc)
rtree_prefork(chunks_rtree);
chunk_dss_prefork();
}
void
chunk_postfork_parent(void)
{
chunk_dss_postfork_parent();
if (config_ivsalloc)
rtree_postfork_parent(chunks_rtree);
malloc_mutex_postfork_parent(&chunks_mtx);
}
void
chunk_postfork_child(void)
{
chunk_dss_postfork_child();
if (config_ivsalloc)
rtree_postfork_child(chunks_rtree);
malloc_mutex_postfork_child(&chunks_mtx);
}

View File

@ -3,6 +3,16 @@
/******************************************************************************/ /******************************************************************************/
/* Data. */ /* Data. */
const char *dss_prec_names[] = {
"disabled",
"primary",
"secondary",
"N/A"
};
/* Current dss precedence default, used when creating new arenas. */
static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/* /*
* Protects sbrk() calls. This avoids malloc races among threads, though it * Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly. * does not protect against races with threads that call sbrk() directly.
@ -29,6 +39,31 @@ sbrk(intptr_t increment)
} }
#endif #endif
dss_prec_t
chunk_dss_prec_get(void)
{
dss_prec_t ret;
if (config_dss == false)
return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
return (ret);
}
bool
chunk_dss_prec_set(dss_prec_t dss_prec)
{
if (config_dss == false)
return (true);
malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec;
malloc_mutex_unlock(&dss_mtx);
return (false);
}
void * void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero) chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{ {
@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
dss_max = dss_next; dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx); malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) if (cpad_size != 0)
chunk_dealloc(cpad, cpad_size, true); chunk_unmap(cpad, cpad_size);
if (*zero) { if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);

View File

@ -113,22 +113,30 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
#endif #endif
} }
void bool
pages_purge(void *addr, size_t length) pages_purge(void *addr, size_t length)
{ {
bool unzeroed;
#ifdef _WIN32 #ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
unzeroed = true;
#else #else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED # define JEMALLOC_MADV_PURGE MADV_DONTNEED
# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE) # elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE # define JEMALLOC_MADV_PURGE MADV_FREE
# define JEMALLOC_MADV_ZEROS false
# else # else
# error "No method defined for purging unused dirty pages." # error "No method defined for purging unused dirty pages."
# endif # endif
madvise(addr, length, JEMALLOC_MADV_PURGE); int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
# undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS
#endif #endif
return (unzeroed);
} }
static void * static void *

369
src/ctl.c
View File

@ -48,8 +48,8 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen); size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \ #define INDEX_PROTO(n) \
const ctl_named_node_t *n##_index(const size_t *mib, size_t miblen, \ static const ctl_named_node_t *n##_index(const size_t *mib, \
size_t i); size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats); static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats); static void ctl_arena_clear(ctl_arena_stats_t *astats);
@ -58,6 +58,7 @@ static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats); ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i); static void ctl_arena_refresh(arena_t *arena, unsigned i);
static bool ctl_grow(void);
static void ctl_refresh(void); static void ctl_refresh(void);
static bool ctl_init(void); static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp, static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
@ -88,6 +89,7 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind) CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc) CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort) CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas) CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult) CTL_PROTO(opt_lg_dirty_mult)
@ -110,6 +112,10 @@ CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum) CTL_PROTO(opt_prof_accum)
CTL_PROTO(arena_i_purge)
static int arena_purge(unsigned arena_ind);
CTL_PROTO(arena_i_dss)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs) CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size) CTL_PROTO(arenas_bin_i_run_size)
@ -125,6 +131,7 @@ CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins) CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns) CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge) CTL_PROTO(arenas_purge)
CTL_PROTO(arenas_extend)
CTL_PROTO(prof_active) CTL_PROTO(prof_active)
CTL_PROTO(prof_dump) CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval) CTL_PROTO(prof_interval)
@ -158,6 +165,7 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns) CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j) INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_nthreads) CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty) CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped) CTL_PROTO(stats_arenas_i_mapped)
@ -223,6 +231,7 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = { static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)}, {NAME("abort"), CTL(opt_abort)},
{NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)}, {NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)}, {NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
@ -247,6 +256,18 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_accum"), CTL(opt_prof_accum)} {NAME("prof_accum"), CTL(opt_prof_accum)}
}; };
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
{NAME("dss"), CTL(arena_i_dss)}
};
static const ctl_named_node_t super_arena_i_node[] = {
{NAME(""), CHILD(named, arena_i)}
};
static const ctl_indexed_node_t arena_node[] = {
{INDEX(arena_i)}
};
static const ctl_named_node_t arenas_bin_i_node[] = { static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)}, {NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)}, {NAME("nregs"), CTL(arenas_bin_i_nregs)},
@ -282,7 +303,8 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("bin"), CHILD(indexed, arenas_bin)}, {NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)}, {NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)}, {NAME("lrun"), CHILD(indexed, arenas_lrun)},
{NAME("purge"), CTL(arenas_purge)} {NAME("purge"), CTL(arenas_purge)},
{NAME("extend"), CTL(arenas_extend)}
}; };
static const ctl_named_node_t prof_node[] = { static const ctl_named_node_t prof_node[] = {
@ -352,6 +374,7 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
static const ctl_named_node_t stats_arenas_i_node[] = { static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)}, {NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)}, {NAME("mapped"), CTL(stats_arenas_i_mapped)},
@ -387,6 +410,7 @@ static const ctl_named_node_t root_node[] = {
{NAME("thread"), CHILD(named, thread)}, {NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)}, {NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)}, {NAME("opt"), CHILD(named, opt)},
{NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)}, {NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)}, {NAME("prof"), CHILD(named, prof)},
{NAME("stats"), CHILD(named, stats)} {NAME("stats"), CHILD(named, stats)}
@ -420,6 +444,7 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats) ctl_arena_clear(ctl_arena_stats_t *astats)
{ {
astats->dss = dss_prec_names[dss_prec_limit];
astats->pactive = 0; astats->pactive = 0;
astats->pdirty = 0; astats->pdirty = 0;
if (config_stats) { if (config_stats) {
@ -439,8 +464,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{ {
unsigned i; unsigned i;
arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty, arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
&cstats->astats, cstats->bstats, cstats->lstats); &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].allocated; cstats->allocated_small += cstats->bstats[i].allocated;
@ -500,7 +525,7 @@ static void
ctl_arena_refresh(arena_t *arena, unsigned i) ctl_arena_refresh(arena_t *arena, unsigned i)
{ {
ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas]; ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats); ctl_arena_clear(astats);
@ -518,11 +543,72 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
} }
} }
static bool
ctl_grow(void)
{
size_t astats_size;
ctl_arena_stats_t *astats;
arena_t **tarenas;
/* Extend arena stats and arenas arrays. */
astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
if (ctl_stats.narenas == narenas_auto) {
/* ctl_stats.arenas and arenas came from base_alloc(). */
astats = (ctl_arena_stats_t *)imalloc(astats_size);
if (astats == NULL)
return (true);
memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
sizeof(arena_t *));
if (tarenas == NULL) {
idalloc(astats);
return (true);
}
memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
} else {
astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
astats_size, 0, 0, false, false);
if (astats == NULL)
return (true);
tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
sizeof(arena_t *), 0, 0, false, false);
if (tarenas == NULL)
return (true);
}
/* Initialize the new astats and arenas elements. */
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
return (true);
tarenas[ctl_stats.narenas] = NULL;
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
memcpy(&tstats, &astats[ctl_stats.narenas],
sizeof(ctl_arena_stats_t));
memcpy(&astats[ctl_stats.narenas],
&astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
ctl_stats.arenas = astats;
ctl_stats.narenas++;
malloc_mutex_lock(&arenas_lock);
arenas = tarenas;
narenas_total++;
arenas_extend(narenas_total - 1);
malloc_mutex_unlock(&arenas_lock);
return (false);
}
static void static void
ctl_refresh(void) ctl_refresh(void)
{ {
unsigned i; unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, narenas); VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
if (config_stats) { if (config_stats) {
malloc_mutex_lock(&chunks_mtx); malloc_mutex_lock(&chunks_mtx);
@ -542,19 +628,19 @@ ctl_refresh(void)
* Clear sum stats, since they will be merged into by * Clear sum stats, since they will be merged into by
* ctl_arena_refresh(). * ctl_arena_refresh().
*/ */
ctl_stats.arenas[narenas].nthreads = 0; ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
ctl_arena_clear(&ctl_stats.arenas[narenas]); ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
malloc_mutex_lock(&arenas_lock); malloc_mutex_lock(&arenas_lock);
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
for (i = 0; i < narenas; i++) { for (i = 0; i < ctl_stats.narenas; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
else else
ctl_stats.arenas[i].nthreads = 0; ctl_stats.arenas[i].nthreads = 0;
} }
malloc_mutex_unlock(&arenas_lock); malloc_mutex_unlock(&arenas_lock);
for (i = 0; i < narenas; i++) { for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL); bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized; ctl_stats.arenas[i].initialized = initialized;
@ -563,11 +649,13 @@ ctl_refresh(void)
} }
if (config_stats) { if (config_stats) {
ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small ctl_stats.allocated =
+ ctl_stats.arenas[narenas].astats.allocated_large ctl_stats.arenas[ctl_stats.narenas].allocated_small
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
+ ctl_stats.huge.allocated;
ctl_stats.active =
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
+ ctl_stats.huge.allocated; + ctl_stats.huge.allocated;
ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
LG_PAGE) + ctl_stats.huge.allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
} }
@ -585,13 +673,15 @@ ctl_init(void)
* Allocate space for one extra arena stats element, which * Allocate space for one extra arena stats element, which
* contains summed stats across all arenas. * contains summed stats across all arenas.
*/ */
assert(narenas_auto == narenas_total_get());
ctl_stats.narenas = narenas_auto;
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
(narenas + 1) * sizeof(ctl_arena_stats_t)); (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) { if (ctl_stats.arenas == NULL) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
memset(ctl_stats.arenas, 0, (narenas + 1) * memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t)); sizeof(ctl_arena_stats_t));
/* /*
@ -601,14 +691,14 @@ ctl_init(void)
*/ */
if (config_stats) { if (config_stats) {
unsigned i; unsigned i;
for (i = 0; i <= narenas; i++) { for (i = 0; i <= ctl_stats.narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) { if (ctl_arena_init(&ctl_stats.arenas[i])) {
ret = true; ret = true;
goto label_return; goto label_return;
} }
} }
} }
ctl_stats.arenas[narenas].initialized = true; ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0; ctl_epoch = 0;
ctl_refresh(); ctl_refresh();
@ -827,6 +917,27 @@ ctl_boot(void)
return (false); return (false);
} }
void
ctl_prefork(void)
{
malloc_mutex_lock(&ctl_mtx);
}
void
ctl_postfork_parent(void)
{
malloc_mutex_postfork_parent(&ctl_mtx);
}
void
ctl_postfork_child(void)
{
malloc_mutex_postfork_child(&ctl_mtx);
}
/******************************************************************************/ /******************************************************************************/
/* *_ctl() functions. */ /* *_ctl() functions. */
@ -1032,8 +1143,8 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
} }
READ(oldval, bool); READ(oldval, bool);
label_return:
ret = 0; ret = 0;
label_return:
return (ret); return (ret);
} }
@ -1063,13 +1174,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret; int ret;
unsigned newind, oldind; unsigned newind, oldind;
malloc_mutex_lock(&ctl_mtx);
newind = oldind = choose_arena(NULL)->ind; newind = oldind = choose_arena(NULL)->ind;
WRITE(newind, unsigned); WRITE(newind, unsigned);
READ(oldind, unsigned); READ(oldind, unsigned);
if (newind != oldind) { if (newind != oldind) {
arena_t *arena; arena_t *arena;
if (newind >= narenas) { if (newind >= ctl_stats.narenas) {
/* New arena index is out of range. */ /* New arena index is out of range. */
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
@ -1102,6 +1214,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
@ -1135,6 +1248,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/ /******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool) CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
@ -1158,12 +1272,128 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
/******************************************************************************/
static int
arena_purge(unsigned arena_ind)
{
int ret;
malloc_mutex_lock(&ctl_mtx);
{
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
malloc_mutex_lock(&arenas_lock);
memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
malloc_mutex_unlock(&arenas_lock);
if (arena_ind == ctl_stats.narenas) {
unsigned i;
for (i = 0; i < ctl_stats.narenas; i++) {
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]);
}
} else {
assert(arena_ind < ctl_stats.narenas);
if (tarenas[arena_ind] != NULL)
arena_purge_all(tarenas[arena_ind]);
}
}
ret = 0;
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
ret = arena_purge(mib[1]);
label_return:
return (ret);
}
static int
arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret, i;
bool match, err;
const char *dss;
unsigned arena_ind = mib[1];
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
malloc_mutex_lock(&ctl_mtx);
WRITE(dss, const char *);
match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strcmp(dss_prec_names[i], dss) == 0) {
dss_prec = i;
match = true;
break;
}
}
if (match == false) {
ret = EINVAL;
goto label_return;
}
if (arena_ind < ctl_stats.narenas) {
arena_t *arena = arenas[arena_ind];
if (arena != NULL) {
dss_prec_old = arena_dss_prec_get(arena);
arena_dss_prec_set(arena, dss_prec);
err = false;
} else
err = true;
} else {
dss_prec_old = chunk_dss_prec_get();
err = chunk_dss_prec_set(dss_prec);
}
dss = dss_prec_names[dss_prec_old];
READ(dss, const char *);
if (err) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
arena_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas) {
ret = NULL;
goto label_return;
}
ret = super_arena_i_node;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/ /******************************************************************************/
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
const ctl_named_node_t * static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
@ -1173,7 +1403,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
} }
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
const ctl_named_node_t * static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
@ -1182,7 +1412,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node); return (super_arenas_lrun_i_node);
} }
CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned) static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
narenas = ctl_stats.narenas;
READ(narenas, unsigned);
ret = 0;
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
@ -1193,13 +1443,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(&ctl_mtx);
READONLY(); READONLY();
if (*oldlenp != narenas * sizeof(bool)) { if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL; ret = EINVAL;
nread = (*oldlenp < narenas * sizeof(bool)) nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
? (*oldlenp / sizeof(bool)) : narenas; ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else { } else {
ret = 0; ret = 0;
nread = narenas; nread = ctl_stats.narenas;
} }
for (i = 0; i < nread; i++) for (i = 0; i < nread; i++)
@ -1222,36 +1472,42 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) void *newp, size_t newlen)
{ {
int ret; int ret;
unsigned arena; unsigned arena_ind;
malloc_mutex_lock(&ctl_mtx);
WRITEONLY(); WRITEONLY();
arena = UINT_MAX; arena_ind = UINT_MAX;
WRITE(arena, unsigned); WRITE(arena_ind, unsigned);
if (newp != NULL && arena >= narenas) { if (newp != NULL && arena_ind >= ctl_stats.narenas)
ret = EFAULT; ret = EFAULT;
goto label_return; else {
} else { if (arena_ind == UINT_MAX)
VARIABLE_ARRAY(arena_t *, tarenas, narenas); arena_ind = ctl_stats.narenas;
ret = arena_purge(arena_ind);
malloc_mutex_lock(&arenas_lock);
memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
malloc_mutex_unlock(&arenas_lock);
if (arena == UINT_MAX) {
unsigned i;
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
arena_purge_all(tarenas[i]);
}
} else {
assert(arena < narenas);
if (tarenas[arena] != NULL)
arena_purge_all(tarenas[arena]);
}
} }
label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
static int
arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
malloc_mutex_lock(&ctl_mtx);
READONLY();
if (ctl_grow()) {
ret = EAGAIN;
goto label_return;
}
READ(ctl_stats.narenas - 1, unsigned);
ret = 0; ret = 0;
label_return: label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret); return (ret);
} }
@ -1356,7 +1612,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{ {
@ -1374,7 +1630,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{ {
@ -1384,6 +1640,7 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
} }
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
@ -1395,13 +1652,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
CTL_RO_CGEN(config_stats, stats_arenas_i_purged, CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t) ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
const ctl_named_node_t * static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{ {
const ctl_named_node_t * ret; const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (ctl_stats.arenas[i].initialized == false) { if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
} }

View File

@ -48,7 +48,8 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below. * it is possible to make correct junk/zero fill decisions below.
*/ */
is_zeroed = zero; is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed); ret = chunk_alloc(csize, alignment, false, &is_zeroed,
chunk_dss_prec_get());
if (ret == NULL) { if (ret == NULL) {
base_node_dealloc(node); base_node_dealloc(node);
return (NULL); return (NULL);
@ -101,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void * void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero) size_t alignment, bool zero, bool try_tcache_dalloc)
{ {
void *ret; void *ret;
size_t copysize; size_t copysize;
@ -180,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif #endif
{ {
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
iqalloc(ptr); iqallocx(ptr, try_tcache_dalloc);
} }
return (ret); return (ret);
} }

View File

@ -33,7 +33,8 @@ unsigned ncpus;
malloc_mutex_t arenas_lock; malloc_mutex_t arenas_lock;
arena_t **arenas; arena_t **arenas;
unsigned narenas; unsigned narenas_total;
unsigned narenas_auto;
/* Set to true once the allocator has been initialized. */ /* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false; static bool malloc_initialized = false;
@ -144,14 +145,14 @@ choose_arena_hard(void)
{ {
arena_t *ret; arena_t *ret;
if (narenas > 1) { if (narenas_auto > 1) {
unsigned i, choose, first_null; unsigned i, choose, first_null;
choose = 0; choose = 0;
first_null = narenas; first_null = narenas_auto;
malloc_mutex_lock(&arenas_lock); malloc_mutex_lock(&arenas_lock);
assert(arenas[0] != NULL); assert(arenas[0] != NULL);
for (i = 1; i < narenas; i++) { for (i = 1; i < narenas_auto; i++) {
if (arenas[i] != NULL) { if (arenas[i] != NULL) {
/* /*
* Choose the first arena that has the lowest * Choose the first arena that has the lowest
@ -160,7 +161,7 @@ choose_arena_hard(void)
if (arenas[i]->nthreads < if (arenas[i]->nthreads <
arenas[choose]->nthreads) arenas[choose]->nthreads)
choose = i; choose = i;
} else if (first_null == narenas) { } else if (first_null == narenas_auto) {
/* /*
* Record the index of the first uninitialized * Record the index of the first uninitialized
* arena, in case all extant arenas are in use. * arena, in case all extant arenas are in use.
@ -174,7 +175,8 @@ choose_arena_hard(void)
} }
} }
if (arenas[choose]->nthreads == 0 || first_null == narenas) { if (arenas[choose]->nthreads == 0
|| first_null == narenas_auto) {
/* /*
* Use an unloaded arena, or the least loaded arena if * Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized. * all arenas are already initialized.
@ -203,7 +205,7 @@ stats_print_atexit(void)
{ {
if (config_tcache && config_stats) { if (config_tcache && config_stats) {
unsigned i; unsigned narenas, i;
/* /*
* Merge stats from extant threads. This is racy, since * Merge stats from extant threads. This is racy, since
@ -212,7 +214,7 @@ stats_print_atexit(void)
* out of date by the time they are reported, if other threads * out of date by the time they are reported, if other threads
* continue to allocate. * continue to allocate.
*/ */
for (i = 0; i < narenas; i++) { for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arenas[i]; arena_t *arena = arenas[i];
if (arena != NULL) { if (arena != NULL) {
tcache_t *tcache; tcache_t *tcache;
@ -254,12 +256,13 @@ malloc_ncpus(void)
result = si.dwNumberOfProcessors; result = si.dwNumberOfProcessors;
#else #else
result = sysconf(_SC_NPROCESSORS_ONLN); result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
if (result == -1) { if (result == -1) {
/* Error. */ /* Error. */
ret = 1; ret = 1;
} } else {
#endif ret = (unsigned)result;
ret = (unsigned)result; }
return (ret); return (ret);
} }
@ -377,6 +380,22 @@ malloc_conf_init(void)
const char *opts, *k, *v; const char *opts, *k, *v;
size_t klen, vlen; size_t klen, vlen;
/*
* Automatically configure valgrind before processing options. The
* valgrind option remains in jemalloc 3.x for compatibility reasons.
*/
if (config_valgrind) {
opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
if (config_fill && opt_valgrind) {
opt_junk = false;
assert(opt_zero == false);
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
opt_redzone = true;
}
if (config_tcache && opt_valgrind)
opt_tcache = false;
}
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
/* Get runtime configuration. */ /* Get runtime configuration. */
switch (i) { switch (i) {
@ -537,6 +556,30 @@ malloc_conf_init(void)
*/ */
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
if (chunk_dss_prec_set(i)) {
malloc_conf_error(
"Error setting dss",
k, klen, v, vlen);
} else {
opt_dss =
dss_prec_names[i];
match = true;
break;
}
}
}
if (match == false) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX) SIZE_T_MAX)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
@ -553,20 +596,7 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_utrace, "utrace") CONF_HANDLE_BOOL(opt_utrace, "utrace")
} }
if (config_valgrind) { if (config_valgrind) {
bool hit; CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
CONF_HANDLE_BOOL_HIT(opt_valgrind,
"valgrind", hit)
if (config_fill && opt_valgrind && hit) {
opt_junk = false;
opt_zero = false;
if (opt_quarantine == 0) {
opt_quarantine =
JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
}
opt_redzone = true;
}
if (hit)
continue;
} }
if (config_xmalloc) { if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
@ -695,9 +725,9 @@ malloc_init_hard(void)
* Create enough scaffolding to allow recursive allocation in * Create enough scaffolding to allow recursive allocation in
* malloc_ncpus(). * malloc_ncpus().
*/ */
narenas = 1; narenas_total = narenas_auto = 1;
arenas = init_arenas; arenas = init_arenas;
memset(arenas, 0, sizeof(arena_t *) * narenas); memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/* /*
* Initialize one arena here. The rest are lazily created in * Initialize one arena here. The rest are lazily created in
@ -755,20 +785,21 @@ malloc_init_hard(void)
else else
opt_narenas = 1; opt_narenas = 1;
} }
narenas = opt_narenas; narenas_auto = opt_narenas;
/* /*
* Make sure that the arenas array can be allocated. In practice, this * Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl * limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits. * machinery will fail to allocate memory at far lower limits.
*/ */
if (narenas > chunksize / sizeof(arena_t *)) { if (narenas_auto > chunksize / sizeof(arena_t *)) {
narenas = chunksize / sizeof(arena_t *); narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas); narenas_auto);
} }
narenas_total = narenas_auto;
/* Allocate and initialize arenas. */ /* Allocate and initialize arenas. */
arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL) { if (arenas == NULL) {
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
return (true); return (true);
@ -777,7 +808,7 @@ malloc_init_hard(void)
* Zero the array. In practice, this should always be pre-zeroed, * Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure. * since it was just mmap()ed, but let's be sure.
*/ */
memset(arenas, 0, sizeof(arena_t *) * narenas); memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */ /* Copy the pointer to the one arena that was already initialized. */
arenas[0] = init_arenas[0]; arenas[0] = init_arenas[0];
@ -1262,11 +1293,10 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be * passed an extra argument for the caller return address, which will be
* ignored. * ignored.
*/ */
JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free; JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc; JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) = JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
je_realloc; JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
je_memalign; je_memalign;
#endif #endif
@ -1279,7 +1309,7 @@ JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
*/ */
size_t size_t
je_malloc_usable_size(const void *ptr) je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{ {
size_t ret; size_t ret;
@ -1343,18 +1373,19 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
#ifdef JEMALLOC_EXPERIMENTAL #ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_INLINE void * JEMALLOC_INLINE void *
iallocm(size_t usize, size_t alignment, bool zero) iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{ {
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
alignment))); alignment)));
if (alignment != 0) if (alignment != 0)
return (ipalloc(usize, alignment, zero)); return (ipallocx(usize, alignment, zero, try_tcache, arena));
else if (zero) else if (zero)
return (icalloc(usize)); return (icallocx(usize, try_tcache, arena));
else else
return (imalloc(usize)); return (imallocx(usize, try_tcache, arena));
} }
int int
@ -1365,6 +1396,9 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1)); & (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO; bool zero = flags & ALLOCM_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
arena_t *arena;
bool try_tcache;
assert(ptr != NULL); assert(ptr != NULL);
assert(size != 0); assert(size != 0);
@ -1372,6 +1406,14 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
if (malloc_init()) if (malloc_init())
goto label_oom; goto label_oom;
if (arena_ind != UINT_MAX) {
arena = arenas[arena_ind];
try_tcache = false;
} else {
arena = NULL;
try_tcache = true;
}
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (usize == 0) if (usize == 0)
goto label_oom; goto label_oom;
@ -1388,18 +1430,19 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
alignment); alignment);
assert(usize_promoted != 0); assert(usize_promoted != 0);
p = iallocm(usize_promoted, alignment, zero); p = iallocm(usize_promoted, alignment, zero,
try_tcache, arena);
if (p == NULL) if (p == NULL)
goto label_oom; goto label_oom;
arena_prof_promoted(p, usize); arena_prof_promoted(p, usize);
} else { } else {
p = iallocm(usize, alignment, zero); p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL) if (p == NULL)
goto label_oom; goto label_oom;
} }
prof_malloc(p, usize, cnt); prof_malloc(p, usize, cnt);
} else { } else {
p = iallocm(usize, alignment, zero); p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL) if (p == NULL)
goto label_oom; goto label_oom;
} }
@ -1436,6 +1479,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
& (SIZE_T_MAX-1)); & (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO; bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE; bool no_move = flags & ALLOCM_NO_MOVE;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
assert(ptr != NULL); assert(ptr != NULL);
assert(*ptr != NULL); assert(*ptr != NULL);
@ -1443,6 +1489,19 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(SIZE_T_MAX - size >= extra); assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized || IS_INITIALIZER);
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk;
try_tcache_alloc = true;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
arenas[arena_ind]);
arena = arenas[arena_ind];
} else {
try_tcache_alloc = true;
try_tcache_dalloc = true;
arena = NULL;
}
p = *ptr; p = *ptr;
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt; prof_thr_cnt_t *cnt;
@ -1469,9 +1528,10 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
<= SMALL_MAXCLASS) { <= SMALL_MAXCLASS) {
q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
alignment, zero, no_move); alignment, zero, no_move, try_tcache_alloc,
try_tcache_dalloc, arena);
if (q == NULL) if (q == NULL)
goto label_err; goto label_err;
if (max_usize < PAGE) { if (max_usize < PAGE) {
@ -1480,7 +1540,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
} else } else
usize = isalloc(q, config_prof); usize = isalloc(q, config_prof);
} else { } else {
q = iralloc(p, size, extra, alignment, zero, no_move); q = irallocx(p, size, extra, alignment, zero, no_move,
try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL) if (q == NULL)
goto label_err; goto label_err;
usize = isalloc(q, config_prof); usize = isalloc(q, config_prof);
@ -1497,7 +1558,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
old_size = isalloc(p, false); old_size = isalloc(p, false);
old_rzsize = u2rz(old_size); old_rzsize = u2rz(old_size);
} }
q = iralloc(p, size, extra, alignment, zero, no_move); q = irallocx(p, size, extra, alignment, zero, no_move,
try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL) if (q == NULL)
goto label_err; goto label_err;
if (config_stats) if (config_stats)
@ -1558,10 +1620,19 @@ je_dallocm(void *ptr, int flags)
{ {
size_t usize; size_t usize;
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache;
assert(ptr != NULL); assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER); assert(malloc_initialized || IS_INITIALIZER);
if (arena_ind != UINT_MAX) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
try_tcache = (chunk == ptr || chunk->arena !=
arenas[arena_ind]);
} else
try_tcache = true;
UTRACE(ptr, 0, 0); UTRACE(ptr, 0, 0);
if (config_stats || config_valgrind) if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof); usize = isalloc(ptr, config_prof);
@ -1574,7 +1645,7 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize; thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind) if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr); rzsize = p2rz(ptr);
iqalloc(ptr); iqallocx(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize); JEMALLOC_VALGRIND_FREE(ptr, rzsize);
return (ALLOCM_SUCCESS); return (ALLOCM_SUCCESS);
@ -1611,6 +1682,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork(). * malloc during fork().
*/ */
/*
* If an application creates a thread before doing any allocation in the main
* thread, then calls fork(2) in the main thread followed by memory allocation
* in the child process, a race can occur that results in deadlock within the
* child: the main thread may have forked while the created thread had
* partially initialized the allocator. Ordinarily jemalloc prevents
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
* constructor is a partial solution to this problem. It may still possible to
* trigger the deadlock described above, but doing so would involve forking via
* a library constructor that runs before jemalloc's runs.
*/
JEMALLOC_ATTR(constructor)
static void
jemalloc_constructor(void)
{
malloc_init();
}
#ifndef JEMALLOC_MUTEX_INIT_CB #ifndef JEMALLOC_MUTEX_INIT_CB
void void
jemalloc_prefork(void) jemalloc_prefork(void)
@ -1628,14 +1720,16 @@ _malloc_prefork(void)
assert(malloc_initialized); assert(malloc_initialized);
/* Acquire all mutexes in a safe order. */ /* Acquire all mutexes in a safe order. */
ctl_prefork();
malloc_mutex_prefork(&arenas_lock); malloc_mutex_prefork(&arenas_lock);
for (i = 0; i < narenas; i++) { for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
arena_prefork(arenas[i]); arena_prefork(arenas[i]);
} }
prof_prefork();
chunk_prefork();
base_prefork(); base_prefork();
huge_prefork(); huge_prefork();
chunk_dss_prefork();
} }
#ifndef JEMALLOC_MUTEX_INIT_CB #ifndef JEMALLOC_MUTEX_INIT_CB
@ -1655,14 +1749,16 @@ _malloc_postfork(void)
assert(malloc_initialized); assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_parent();
huge_postfork_parent(); huge_postfork_parent();
base_postfork_parent(); base_postfork_parent();
for (i = 0; i < narenas; i++) { chunk_postfork_parent();
prof_postfork_parent();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]); arena_postfork_parent(arenas[i]);
} }
malloc_mutex_postfork_parent(&arenas_lock); malloc_mutex_postfork_parent(&arenas_lock);
ctl_postfork_parent();
} }
void void
@ -1673,14 +1769,16 @@ jemalloc_postfork_child(void)
assert(malloc_initialized); assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */ /* Release all mutexes, now that fork() has completed. */
chunk_dss_postfork_child();
huge_postfork_child(); huge_postfork_child();
base_postfork_child(); base_postfork_child();
for (i = 0; i < narenas; i++) { chunk_postfork_child();
prof_postfork_child();
for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL) if (arenas[i] != NULL)
arena_postfork_child(arenas[i]); arena_postfork_child(arenas[i]);
} }
malloc_mutex_postfork_child(&arenas_lock); malloc_mutex_postfork_child(&arenas_lock);
ctl_postfork_child();
} }
/******************************************************************************/ /******************************************************************************/

View File

@ -64,7 +64,7 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t)); void *(calloc_cb)(size_t, size_t));
#endif #endif

View File

@ -1270,4 +1270,46 @@ prof_boot2(void)
return (false); return (false);
} }
void
prof_prefork(void)
{
if (opt_prof) {
unsigned i;
malloc_mutex_lock(&bt2ctx_mtx);
malloc_mutex_lock(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_lock(&ctx_locks[i]);
}
}
void
prof_postfork_parent(void)
{
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_parent(&ctx_locks[i]);
malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
malloc_mutex_postfork_parent(&bt2ctx_mtx);
}
}
void
prof_postfork_child(void)
{
if (opt_prof) {
unsigned i;
for (i = 0; i < PROF_NCTX_LOCKS; i++)
malloc_mutex_postfork_child(&ctx_locks[i]);
malloc_mutex_postfork_child(&prof_dump_seq_mtx);
malloc_mutex_postfork_child(&bt2ctx_mtx);
}
}
/******************************************************************************/ /******************************************************************************/

View File

@ -44,3 +44,24 @@ rtree_new(unsigned bits)
return (ret); return (ret);
} }
void
rtree_prefork(rtree_t *rtree)
{
malloc_mutex_prefork(&rtree->mutex);
}
void
rtree_postfork_parent(rtree_t *rtree)
{
malloc_mutex_postfork_parent(&rtree->mutex);
}
void
rtree_postfork_child(rtree_t *rtree)
{
malloc_mutex_postfork_child(&rtree->mutex);
}

View File

@ -206,6 +206,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i, bool bins, bool large) unsigned i, bool bins, bool large)
{ {
unsigned nthreads; unsigned nthreads;
const char *dss;
size_t page, pactive, pdirty, mapped; size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged; uint64_t npurge, nmadvise, purged;
size_t small_allocated; size_t small_allocated;
@ -218,6 +219,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads); "assigned threads: %u\n", nthreads);
CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
dss);
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
@ -370,6 +374,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Run-time option settings:\n"); "Run-time option settings:\n");
OPT_WRITE_BOOL(abort) OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk) OPT_WRITE_SIZE_T(lg_chunk)
OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas) OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult) OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print) OPT_WRITE_BOOL(stats_print)
@ -400,7 +405,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_GET("arenas.narenas", &uv, unsigned); CTL_GET("arenas.narenas", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv); malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *)); sizeof(void *));
@ -472,7 +477,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.chunks.current", &chunks_current, size_t); CTL_GET("stats.chunks.current", &chunks_current, size_t);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n"); "highchunks curchunks\n");
malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n", malloc_cprintf(write_cb, cbopaque,
" %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current); chunks_total, chunks_high, chunks_current);
/* Print huge stats. */ /* Print huge stats. */

View File

@ -288,7 +288,7 @@ tcache_create(arena_t *arena)
else if (size <= tcache_maxclass) else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true); tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else else
tcache = (tcache_t *)icalloc(size); tcache = (tcache_t *)icallocx(size, false, arena);
if (tcache == NULL) if (tcache == NULL)
return (NULL); return (NULL);
@ -364,7 +364,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large(arena, chunk, tcache); arena_dalloc_large(arena, chunk, tcache);
} else } else
idalloc(tcache); idallocx(tcache, false);
} }
void void

View File

@ -377,7 +377,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case '\0': goto label_out; case '\0': goto label_out;
case '%': { case '%': {
bool alt_form = false; bool alt_form = false;
bool zero_pad = false;
bool left_justify = false; bool left_justify = false;
bool plus_space = false; bool plus_space = false;
bool plus_plus = false; bool plus_plus = false;
@ -398,10 +397,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(alt_form == false); assert(alt_form == false);
alt_form = true; alt_form = true;
break; break;
case '0':
assert(zero_pad == false);
zero_pad = true;
break;
case '-': case '-':
assert(left_justify == false); assert(left_justify == false);
left_justify = true; left_justify = true;

66
test/ALLOCM_ARENA.c Normal file
View File

@ -0,0 +1,66 @@
#define JEMALLOC_MANGLE
#include "jemalloc_test.h"
#define NTHREADS 10
void *
je_thread_start(void *arg)
{
unsigned thread_ind = (unsigned)(uintptr_t)arg;
unsigned arena_ind;
int r;
void *p;
size_t rsz, sz;
sz = sizeof(arena_ind);
if (mallctl("arenas.extend", &arena_ind, &sz, NULL, 0)
!= 0) {
malloc_printf("Error in arenas.extend\n");
abort();
}
if (thread_ind % 4 != 3) {
size_t mib[3];
size_t miblen = sizeof(mib) / sizeof(size_t);
const char *dss_precs[] = {"disabled", "primary", "secondary"};
const char *dss = dss_precs[thread_ind % 4];
if (mallctlnametomib("arena.0.dss", mib, &miblen) != 0) {
malloc_printf("Error in mallctlnametomib()\n");
abort();
}
mib[1] = arena_ind;
if (mallctlbymib(mib, miblen, NULL, NULL, (void *)&dss,
sizeof(const char *))) {
malloc_printf("Error in mallctlbymib()\n");
abort();
}
}
r = allocm(&p, &rsz, 1, ALLOCM_ARENA(arena_ind));
if (r != ALLOCM_SUCCESS) {
malloc_printf("Unexpected allocm() error\n");
abort();
}
return (NULL);
}
int
main(void)
{
je_thread_t threads[NTHREADS];
unsigned i;
malloc_printf("Test begin\n");
for (i = 0; i < NTHREADS; i++) {
je_thread_create(&threads[i], je_thread_start,
(void *)(uintptr_t)i);
}
for (i = 0; i < NTHREADS; i++)
je_thread_join(threads[i], NULL);
malloc_printf("Test end\n");
return (0);
}

2
test/ALLOCM_ARENA.exp Normal file
View File

@ -0,0 +1,2 @@
Test begin
Test end

View File

@ -1,7 +1,7 @@
#define JEMALLOC_MANGLE #define JEMALLOC_MANGLE
#include "jemalloc_test.h" #include "jemalloc_test.h"
#define NTHREADS 10 #define NTHREADS 10
void * void *
je_thread_start(void *arg) je_thread_start(void *arg)
@ -66,8 +66,10 @@ main(void)
goto label_return; goto label_return;
} }
for (i = 0; i < NTHREADS; i++) for (i = 0; i < NTHREADS; i++) {
je_thread_create(&threads[i], je_thread_start, (void *)&arena_ind); je_thread_create(&threads[i], je_thread_start,
(void *)&arena_ind);
}
for (i = 0; i < NTHREADS; i++) for (i = 0; i < NTHREADS; i++)
je_thread_join(threads[i], (void *)&ret); je_thread_join(threads[i], (void *)&ret);