Refactor huge allocation to be managed by arenas.
Refactor huge allocation to be managed by arenas (though the global red-black tree of huge allocations remains for lookup during deallocation). This is the logical conclusion of recent changes that 1) made per arena dss precedence apply to huge allocation, and 2) made it possible to replace the per arena chunk allocation/deallocation functions. Remove the top level huge stats, and replace them with per arena huge stats. Normalize function names and types to *dalloc* (some were *dealloc*). Remove the --enable-mremap option. As jemalloc currently operates, this is a performace regression for some applications, but planned work to logarithmically space huge size classes should provide similar amortized performance. The motivation for this change was that mremap-based huge reallocation forced leaky abstractions that prevented refactoring.
This commit is contained in:
parent
fb7fe50a88
commit
e2deab7a75
6
INSTALL
6
INSTALL
@ -132,12 +132,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
released in bulk, thus reducing the total number of mutex operations. See
|
released in bulk, thus reducing the total number of mutex operations. See
|
||||||
the "opt.tcache" option for usage details.
|
the "opt.tcache" option for usage details.
|
||||||
|
|
||||||
--enable-mremap
|
|
||||||
Enable huge realloc() via mremap(2). mremap() is disabled by default
|
|
||||||
because the flavor used is specific to Linux, which has a quirk in its
|
|
||||||
virtual memory allocation algorithm that causes semi-permanent VM map holes
|
|
||||||
under normal jemalloc operation.
|
|
||||||
|
|
||||||
--disable-munmap
|
--disable-munmap
|
||||||
Disable virtual memory deallocation via munmap(2); instead keep track of
|
Disable virtual memory deallocation via munmap(2); instead keep track of
|
||||||
the virtual memory for later use. munmap() is disabled by default (i.e.
|
the virtual memory for later use. munmap() is disabled by default (i.e.
|
||||||
|
@ -137,7 +137,6 @@ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
|||||||
$(srcroot)test/integration/allocated.c \
|
$(srcroot)test/integration/allocated.c \
|
||||||
$(srcroot)test/integration/mallocx.c \
|
$(srcroot)test/integration/mallocx.c \
|
||||||
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
||||||
$(srcroot)test/integration/mremap.c \
|
|
||||||
$(srcroot)test/integration/posix_memalign.c \
|
$(srcroot)test/integration/posix_memalign.c \
|
||||||
$(srcroot)test/integration/rallocx.c \
|
$(srcroot)test/integration/rallocx.c \
|
||||||
$(srcroot)test/integration/thread_arena.c \
|
$(srcroot)test/integration/thread_arena.c \
|
||||||
|
28
configure.ac
28
configure.ac
@ -793,33 +793,6 @@ if test "x$enable_tcache" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_tcache])
|
AC_SUBST([enable_tcache])
|
||||||
|
|
||||||
dnl Disable mremap() for huge realloc() by default.
|
|
||||||
AC_ARG_ENABLE([mremap],
|
|
||||||
[AS_HELP_STRING([--enable-mremap], [Enable mremap(2) for huge realloc()])],
|
|
||||||
[if test "x$enable_mremap" = "xno" ; then
|
|
||||||
enable_mremap="0"
|
|
||||||
else
|
|
||||||
enable_mremap="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_mremap="0"]
|
|
||||||
)
|
|
||||||
if test "x$enable_mremap" = "x1" ; then
|
|
||||||
JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#include <sys/mman.h>
|
|
||||||
], [
|
|
||||||
void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
|
|
||||||
], [je_cv_mremap_fixed])
|
|
||||||
if test "x${je_cv_mremap_fixed}" = "xno" ; then
|
|
||||||
enable_mremap="0"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if test "x$enable_mremap" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_MREMAP], [ ])
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_mremap])
|
|
||||||
|
|
||||||
dnl Enable VM deallocation via munmap() by default.
|
dnl Enable VM deallocation via munmap() by default.
|
||||||
AC_ARG_ENABLE([munmap],
|
AC_ARG_ENABLE([munmap],
|
||||||
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
||||||
@ -1447,7 +1420,6 @@ AC_MSG_RESULT([fill : ${enable_fill}])
|
|||||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||||
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
|
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
|
||||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||||
AC_MSG_RESULT([mremap : ${enable_mremap}])
|
|
||||||
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
||||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||||
AC_MSG_RESULT([tls : ${enable_tls}])
|
AC_MSG_RESULT([tls : ${enable_tls}])
|
||||||
|
@ -486,10 +486,11 @@ for (i = 0; i < nbins; i++) {
|
|||||||
<para>User objects are broken into three categories according to size:
|
<para>User objects are broken into three categories according to size:
|
||||||
small, large, and huge. Small objects are smaller than one page. Large
|
small, large, and huge. Small objects are smaller than one page. Large
|
||||||
objects are smaller than the chunk size. Huge objects are a multiple of
|
objects are smaller than the chunk size. Huge objects are a multiple of
|
||||||
the chunk size. Small and large objects are managed by arenas; huge
|
the chunk size. Small and large objects are managed entirely by arenas;
|
||||||
objects are managed separately in a single data structure that is shared by
|
huge objects are additionally aggregated in a single data structure that is
|
||||||
all threads. Huge objects are used by applications infrequently enough
|
shared by all threads. Huge objects are typically used by applications
|
||||||
that this single data structure is not a scalability issue.</para>
|
infrequently enough that this single data structure is not a scalability
|
||||||
|
issue.</para>
|
||||||
|
|
||||||
<para>Each chunk that is managed by an arena tracks its contents as runs of
|
<para>Each chunk that is managed by an arena tracks its contents as runs of
|
||||||
contiguous pages (unused, backing a set of small objects, or backing one
|
contiguous pages (unused, backing a set of small objects, or backing one
|
||||||
@ -647,16 +648,6 @@ for (i = 0; i < nbins; i++) {
|
|||||||
during build configuration.</para></listitem>
|
during build configuration.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="config.mremap">
|
|
||||||
<term>
|
|
||||||
<mallctl>config.mremap</mallctl>
|
|
||||||
(<type>bool</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
</term>
|
|
||||||
<listitem><para><option>--enable-mremap</option> was specified during
|
|
||||||
build configuration.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="config.munmap">
|
<varlistentry id="config.munmap">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>config.munmap</mallctl>
|
<mallctl>config.munmap</mallctl>
|
||||||
@ -1273,14 +1264,9 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<listitem><para>Set the precedence of dss allocation as related to mmap
|
<listitem><para>Set the precedence of dss allocation as related to mmap
|
||||||
allocation for arena <i>, or for all arenas if <i> equals
|
allocation for arena <i>, or for all arenas if <i> equals
|
||||||
<link
|
<link
|
||||||
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. Note
|
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
|
||||||
that even during huge allocation this setting is read from the arena
|
<link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
|
||||||
that would be chosen for small or large allocation so that applications
|
settings.</para></listitem>
|
||||||
can depend on consistent dss versus mmap allocation regardless of
|
|
||||||
allocation size. See <link
|
|
||||||
linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
|
|
||||||
settings.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arena.i.chunk.alloc">
|
<varlistentry id="arena.i.chunk.alloc">
|
||||||
@ -1291,8 +1277,8 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
</term>
|
</term>
|
||||||
<listitem><para>Get or set the chunk allocation function for arena
|
<listitem><para>Get or set the chunk allocation function for arena
|
||||||
<i>. If setting, the chunk deallocation function should
|
<i>. If setting, the chunk deallocation function should
|
||||||
also be set via <link linkend="arena.i.chunk.dealloc">
|
also be set via <link linkend="arena.i.chunk.dalloc">
|
||||||
<mallctl>arena.<i>.chunk.dealloc</mallctl></link> to a companion
|
<mallctl>arena.<i>.chunk.dalloc</mallctl></link> to a companion
|
||||||
function that knows how to deallocate the chunks.
|
function that knows how to deallocate the chunks.
|
||||||
<funcprototype>
|
<funcprototype>
|
||||||
<funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
|
<funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
|
||||||
@ -1313,13 +1299,18 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
size. The <parameter>alignment</parameter> parameter is always a power
|
size. The <parameter>alignment</parameter> parameter is always a power
|
||||||
of two at least as large as the chunk size. Zeroing is mandatory if
|
of two at least as large as the chunk size. Zeroing is mandatory if
|
||||||
<parameter>*zero</parameter> is true upon function
|
<parameter>*zero</parameter> is true upon function
|
||||||
entry.</para></listitem>
|
entry.</para>
|
||||||
|
|
||||||
|
<para>Note that replacing the default chunk allocation function makes
|
||||||
|
the arena's <link
|
||||||
|
linkend="arena.i.dss"><mallctl>arena.<i>.dss</mallctl></link>
|
||||||
|
setting irrelevant.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arena.i.chunk.dealloc">
|
<varlistentry id="arena.i.chunk.dalloc">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>arena.<i>.chunk.dealloc</mallctl>
|
<mallctl>arena.<i>.chunk.dalloc</mallctl>
|
||||||
(<type>chunk_dealloc_t *</type>)
|
(<type>chunk_dalloc_t *</type>)
|
||||||
<literal>rw</literal>
|
<literal>rw</literal>
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Get or set the chunk deallocation function for arena
|
<listitem><para>Get or set the chunk deallocation function for arena
|
||||||
@ -1335,13 +1326,13 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
created chunks prior to the application having an opportunity to take
|
created chunks prior to the application having an opportunity to take
|
||||||
over chunk allocation.
|
over chunk allocation.
|
||||||
<funcprototype>
|
<funcprototype>
|
||||||
<funcdef>typedef void <function>(chunk_dealloc_t)</function></funcdef>
|
<funcdef>typedef void <function>(chunk_dalloc_t)</function></funcdef>
|
||||||
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
<paramdef>size_t <parameter>size</parameter></paramdef>
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
</funcprototype>
|
</funcprototype>
|
||||||
A chunk deallocation function conforms to the
|
A chunk deallocation function conforms to the
|
||||||
<type>chunk_dealloc_t</type> type and deallocates a
|
<type>chunk_dalloc_t</type> type and deallocates a
|
||||||
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
||||||
behalf of arena <parameter>arena_ind</parameter>.</para></listitem>
|
behalf of arena <parameter>arena_ind</parameter>.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
@ -1608,39 +1599,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
</para></listitem>
|
</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.huge.allocated">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.huge.allocated</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Number of bytes currently allocated by huge objects.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.huge.nmalloc">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.huge.nmalloc</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of huge allocation requests.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.huge.ndalloc">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.huge.ndalloc</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of huge deallocation requests.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.dss">
|
<varlistentry id="stats.arenas.i.dss">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.dss</mallctl>
|
<mallctl>stats.arenas.<i>.dss</mallctl>
|
||||||
@ -1817,6 +1775,50 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
</para></listitem>
|
</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.allocated">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.allocated</mallctl>
|
||||||
|
(<type>size_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Number of bytes currently allocated by huge objects.
|
||||||
|
</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.nmalloc">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.nmalloc</mallctl>
|
||||||
|
(<type>uint64_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Cumulative number of huge allocation requests served
|
||||||
|
directly by the arena.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.ndalloc">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.ndalloc</mallctl>
|
||||||
|
(<type>uint64_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Cumulative number of huge deallocation requests served
|
||||||
|
directly by the arena.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.nrequests">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.nrequests</mallctl>
|
||||||
|
(<type>uint64_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Cumulative number of huge allocation requests.
|
||||||
|
</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.bins.j.allocated">
|
<varlistentry id="stats.arenas.i.bins.j.allocated">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.bins.<j>.allocated</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.allocated</mallctl>
|
||||||
|
@ -345,7 +345,7 @@ struct arena_s {
|
|||||||
*/
|
*/
|
||||||
arena_chunk_t *spare;
|
arena_chunk_t *spare;
|
||||||
|
|
||||||
/* Number of pages in active runs. */
|
/* Number of pages in active runs and huge regions. */
|
||||||
size_t nactive;
|
size_t nactive;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -374,7 +374,7 @@ struct arena_s {
|
|||||||
* user-configureable chunk allocation and deallocation functions.
|
* user-configureable chunk allocation and deallocation functions.
|
||||||
*/
|
*/
|
||||||
chunk_alloc_t *chunk_alloc;
|
chunk_alloc_t *chunk_alloc;
|
||||||
chunk_dealloc_t *chunk_dealloc;
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
/* bins is used to store trees of free regions. */
|
/* bins is used to store trees of free regions. */
|
||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
@ -403,6 +403,9 @@ extern arena_bin_info_t arena_bin_info[NBINS];
|
|||||||
/* Number of large size classes. */
|
/* Number of large size classes. */
|
||||||
#define nlclasses (chunk_npages - map_bias)
|
#define nlclasses (chunk_npages - map_bias)
|
||||||
|
|
||||||
|
void *arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
|
||||||
|
bool *zero);
|
||||||
|
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size);
|
||||||
void arena_purge_all(arena_t *arena);
|
void arena_purge_all(arena_t *arena);
|
||||||
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
||||||
size_t binind, uint64_t prof_accumbytes);
|
size_t binind, uint64_t prof_accumbytes);
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
void *base_alloc(size_t size);
|
void *base_alloc(size_t size);
|
||||||
void *base_calloc(size_t number, size_t size);
|
void *base_calloc(size_t number, size_t size);
|
||||||
extent_node_t *base_node_alloc(void);
|
extent_node_t *base_node_alloc(void);
|
||||||
void base_node_dealloc(extent_node_t *node);
|
void base_node_dalloc(extent_node_t *node);
|
||||||
bool base_boot(void);
|
bool base_boot(void);
|
||||||
void base_prefork(void);
|
void base_prefork(void);
|
||||||
void base_postfork_parent(void);
|
void base_postfork_parent(void);
|
||||||
|
@ -43,12 +43,14 @@ extern size_t chunk_npages;
|
|||||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
void *chunk_alloc(arena_t *arena, size_t size, size_t alignment, bool base,
|
void *chunk_alloc_base(size_t size);
|
||||||
bool *zero, dss_prec_t dss_prec);
|
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
||||||
|
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, size_t size,
|
||||||
|
size_t alignment, bool *zero);
|
||||||
void *chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
void *chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
||||||
unsigned arena_ind);
|
unsigned arena_ind);
|
||||||
void chunk_unmap(void *chunk, size_t size);
|
void chunk_unmap(void *chunk, size_t size);
|
||||||
void chunk_dealloc(arena_t *arena, void *chunk, size_t size, bool unmap);
|
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(void);
|
||||||
void chunk_postfork_parent(void);
|
void chunk_postfork_parent(void);
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
bool pages_purge(void *addr, size_t length);
|
bool pages_purge(void *addr, size_t length);
|
||||||
|
|
||||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
bool chunk_dalloc_mmap(void *chunk, size_t size);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -57,11 +57,6 @@ struct ctl_stats_s {
|
|||||||
uint64_t total; /* stats_chunks.nchunks */
|
uint64_t total; /* stats_chunks.nchunks */
|
||||||
size_t high; /* stats_chunks.highchunks */
|
size_t high; /* stats_chunks.highchunks */
|
||||||
} chunks;
|
} chunks;
|
||||||
struct {
|
|
||||||
size_t allocated; /* huge_allocated */
|
|
||||||
uint64_t nmalloc; /* huge_nmalloc */
|
|
||||||
uint64_t ndalloc; /* huge_ndalloc */
|
|
||||||
} huge;
|
|
||||||
unsigned narenas;
|
unsigned narenas;
|
||||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||||
};
|
};
|
||||||
|
@ -9,30 +9,18 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
/* Huge allocation statistics. */
|
void *huge_malloc(arena_t *arena, size_t size, bool zero);
|
||||||
extern uint64_t huge_nmalloc;
|
void *huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
|
||||||
extern uint64_t huge_ndalloc;
|
|
||||||
extern size_t huge_allocated;
|
|
||||||
|
|
||||||
/* Protects chunk-related data structures. */
|
|
||||||
extern malloc_mutex_t huge_mtx;
|
|
||||||
|
|
||||||
void *huge_malloc(arena_t *arena, size_t size, bool zero,
|
|
||||||
dss_prec_t dss_prec);
|
|
||||||
void *huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
|
|
||||||
dss_prec_t dss_prec);
|
|
||||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra);
|
size_t extra);
|
||||||
void *huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
void *huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc,
|
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc);
|
||||||
dss_prec_t dss_prec);
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||||
#endif
|
#endif
|
||||||
void huge_dalloc(void *ptr, bool unmap);
|
void huge_dalloc(void *ptr);
|
||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(const void *ptr);
|
||||||
dss_prec_t huge_dss_prec_get(arena_t *arena);
|
|
||||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||||
bool huge_boot(void);
|
bool huge_boot(void);
|
||||||
|
@ -122,13 +122,6 @@ static const bool config_prof_libunwind =
|
|||||||
false
|
false
|
||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
static const bool config_mremap =
|
|
||||||
#ifdef JEMALLOC_MREMAP
|
|
||||||
true
|
|
||||||
#else
|
|
||||||
false
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
static const bool config_munmap =
|
static const bool config_munmap =
|
||||||
#ifdef JEMALLOC_MUNMAP
|
#ifdef JEMALLOC_MUNMAP
|
||||||
true
|
true
|
||||||
@ -702,8 +695,7 @@ imalloct(size_t size, bool try_tcache, arena_t *arena)
|
|||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
return (arena_malloc(arena, size, false, try_tcache));
|
return (arena_malloc(arena, size, false, try_tcache));
|
||||||
else
|
else
|
||||||
return (huge_malloc(arena, size, false,
|
return (huge_malloc(arena, size, false));
|
||||||
huge_dss_prec_get(arena)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -720,8 +712,7 @@ icalloct(size_t size, bool try_tcache, arena_t *arena)
|
|||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
return (arena_malloc(arena, size, true, try_tcache));
|
return (arena_malloc(arena, size, true, try_tcache));
|
||||||
else
|
else
|
||||||
return (huge_malloc(arena, size, true,
|
return (huge_malloc(arena, size, true));
|
||||||
huge_dss_prec_get(arena)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -747,11 +738,9 @@ ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
|
|||||||
ret = arena_palloc(choose_arena(arena), usize,
|
ret = arena_palloc(choose_arena(arena), usize,
|
||||||
alignment, zero);
|
alignment, zero);
|
||||||
} else if (alignment <= chunksize)
|
} else if (alignment <= chunksize)
|
||||||
ret = huge_malloc(arena, usize, zero,
|
ret = huge_malloc(arena, usize, zero);
|
||||||
huge_dss_prec_get(arena));
|
|
||||||
else
|
else
|
||||||
ret = huge_palloc(arena, usize, alignment, zero,
|
ret = huge_palloc(arena, usize, alignment, zero);
|
||||||
huge_dss_prec_get(arena));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
@ -833,7 +822,7 @@ idalloct(void *ptr, bool try_tcache)
|
|||||||
if (chunk != ptr)
|
if (chunk != ptr)
|
||||||
arena_dalloc(chunk, ptr, try_tcache);
|
arena_dalloc(chunk, ptr, try_tcache);
|
||||||
else
|
else
|
||||||
huge_dalloc(ptr, true);
|
huge_dalloc(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -920,7 +909,7 @@ iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
|||||||
try_tcache_dalloc));
|
try_tcache_dalloc));
|
||||||
} else {
|
} else {
|
||||||
return (huge_ralloc(arena, ptr, oldsize, size, extra,
|
return (huge_ralloc(arena, ptr, oldsize, size, extra,
|
||||||
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
|
alignment, zero, try_tcache_dalloc));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,13 +144,6 @@
|
|||||||
*/
|
*/
|
||||||
#undef JEMALLOC_MUNMAP
|
#undef JEMALLOC_MUNMAP
|
||||||
|
|
||||||
/*
|
|
||||||
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
|
|
||||||
* disabled by default because it is Linux-specific and it will cause virtual
|
|
||||||
* memory map holes, much like munmap(2) does.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_MREMAP
|
|
||||||
|
|
||||||
/* TLS is used to map arenas and magazine caches to threads. */
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
#undef JEMALLOC_TLS
|
#undef JEMALLOC_TLS
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ arena_alloc_junk_small
|
|||||||
arena_bin_index
|
arena_bin_index
|
||||||
arena_bin_info
|
arena_bin_info
|
||||||
arena_boot
|
arena_boot
|
||||||
|
arena_chunk_alloc_huge
|
||||||
|
arena_chunk_dalloc_huge
|
||||||
arena_dalloc
|
arena_dalloc
|
||||||
arena_dalloc_bin
|
arena_dalloc_bin
|
||||||
arena_dalloc_bin_locked
|
arena_dalloc_bin_locked
|
||||||
@ -86,7 +88,7 @@ base_alloc
|
|||||||
base_boot
|
base_boot
|
||||||
base_calloc
|
base_calloc
|
||||||
base_node_alloc
|
base_node_alloc
|
||||||
base_node_dealloc
|
base_node_dalloc
|
||||||
base_postfork_child
|
base_postfork_child
|
||||||
base_postfork_parent
|
base_postfork_parent
|
||||||
base_prefork
|
base_prefork
|
||||||
@ -103,13 +105,14 @@ bt_init
|
|||||||
buferror
|
buferror
|
||||||
choose_arena
|
choose_arena
|
||||||
choose_arena_hard
|
choose_arena_hard
|
||||||
chunk_alloc
|
chunk_alloc_arena
|
||||||
|
chunk_alloc_base
|
||||||
chunk_alloc_default
|
chunk_alloc_default
|
||||||
chunk_alloc_dss
|
chunk_alloc_dss
|
||||||
chunk_alloc_mmap
|
chunk_alloc_mmap
|
||||||
chunk_boot
|
chunk_boot
|
||||||
chunk_dealloc
|
chunk_dalloc_default
|
||||||
chunk_dealloc_mmap
|
chunk_dalloc_mmap
|
||||||
chunk_dss_boot
|
chunk_dss_boot
|
||||||
chunk_dss_postfork_child
|
chunk_dss_postfork_child
|
||||||
chunk_dss_postfork_parent
|
chunk_dss_postfork_parent
|
||||||
@ -198,9 +201,7 @@ huge_allocated
|
|||||||
huge_boot
|
huge_boot
|
||||||
huge_dalloc
|
huge_dalloc
|
||||||
huge_dalloc_junk
|
huge_dalloc_junk
|
||||||
huge_dss_prec_get
|
|
||||||
huge_malloc
|
huge_malloc
|
||||||
huge_mtx
|
|
||||||
huge_ndalloc
|
huge_ndalloc
|
||||||
huge_nmalloc
|
huge_nmalloc
|
||||||
huge_palloc
|
huge_palloc
|
||||||
|
@ -101,6 +101,11 @@ struct arena_stats_s {
|
|||||||
uint64_t ndalloc_large;
|
uint64_t ndalloc_large;
|
||||||
uint64_t nrequests_large;
|
uint64_t nrequests_large;
|
||||||
|
|
||||||
|
size_t allocated_huge;
|
||||||
|
uint64_t nmalloc_huge;
|
||||||
|
uint64_t ndalloc_huge;
|
||||||
|
uint64_t nrequests_huge;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One element for each possible size class, including sizes that
|
* One element for each possible size class, including sizes that
|
||||||
* overlap with bin size classes. This is necessary because ipalloc()
|
* overlap with bin size classes. This is necessary because ipalloc()
|
||||||
|
@ -46,4 +46,4 @@ JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef void *(chunk_alloc_t)(size_t, size_t, bool *, unsigned);
|
typedef void *(chunk_alloc_t)(size_t, size_t, bool *, unsigned);
|
||||||
typedef bool (chunk_dealloc_t)(void *, size_t, unsigned);
|
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned);
|
||||||
|
113
src/arena.c
113
src/arena.c
@ -559,6 +559,65 @@ arena_chunk_init_spare(arena_t *arena)
|
|||||||
return (chunk);
|
return (chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static arena_chunk_t *
|
||||||
|
arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
|
{
|
||||||
|
arena_chunk_t *chunk;
|
||||||
|
chunk_alloc_t *chunk_alloc;
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
chunk_alloc = arena->chunk_alloc;
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
||||||
|
arena->ind, size, alignment, zero);
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
if (config_stats && chunk != NULL)
|
||||||
|
arena->stats.mapped += chunksize;
|
||||||
|
|
||||||
|
return (chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
chunk_alloc_t *chunk_alloc;
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
chunk_alloc = arena->chunk_alloc;
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
if (config_stats) {
|
||||||
|
/* Optimistically update stats prior to unlocking. */
|
||||||
|
arena->stats.mapped += size;
|
||||||
|
arena->stats.allocated_huge += size;
|
||||||
|
arena->stats.nmalloc_huge++;
|
||||||
|
arena->stats.nrequests_huge++;
|
||||||
|
}
|
||||||
|
arena->nactive += (size >> LG_PAGE);
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
|
||||||
|
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
|
||||||
|
size, alignment, zero);
|
||||||
|
if (config_stats) {
|
||||||
|
if (ret != NULL)
|
||||||
|
stats_cactive_add(size);
|
||||||
|
else {
|
||||||
|
/* Revert optimistic stats updates. */
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
arena->stats.mapped -= size;
|
||||||
|
arena->stats.allocated_huge -= size;
|
||||||
|
arena->stats.nmalloc_huge--;
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
arena_chunk_init_hard(arena_t *arena)
|
arena_chunk_init_hard(arena_t *arena)
|
||||||
{
|
{
|
||||||
@ -569,14 +628,9 @@ arena_chunk_init_hard(arena_t *arena)
|
|||||||
assert(arena->spare == NULL);
|
assert(arena->spare == NULL);
|
||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
|
||||||
chunk = (arena_chunk_t *)chunk_alloc(arena, chunksize, chunksize,
|
|
||||||
false, &zero, arena->dss_prec);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
if (chunk == NULL)
|
if (chunk == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (config_stats)
|
|
||||||
arena->stats.mapped += chunksize;
|
|
||||||
|
|
||||||
chunk->arena = arena;
|
chunk->arena = arena;
|
||||||
|
|
||||||
@ -645,7 +699,38 @@ arena_chunk_alloc(arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
|
||||||
|
{
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
if (config_stats)
|
||||||
|
arena->stats.mapped -= chunksize;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
|
||||||
|
{
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
if (config_stats) {
|
||||||
|
arena->stats.mapped -= size;
|
||||||
|
arena->stats.allocated_huge -= size;
|
||||||
|
arena->stats.ndalloc_huge++;
|
||||||
|
stats_cactive_sub(size);
|
||||||
|
}
|
||||||
|
arena->nactive -= (size >> LG_PAGE);
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk_dalloc(chunk, size, arena->ind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
||||||
{
|
{
|
||||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||||
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
||||||
@ -667,11 +752,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
arena_chunk_t *spare = arena->spare;
|
arena_chunk_t *spare = arena->spare;
|
||||||
|
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
arena_chunk_dalloc_internal(arena, spare);
|
||||||
chunk_dealloc(arena, (void *)spare, chunksize, true);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
if (config_stats)
|
|
||||||
arena->stats.mapped -= chunksize;
|
|
||||||
} else
|
} else
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
}
|
}
|
||||||
@ -1231,7 +1312,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
|
|||||||
if (size == arena_maxclass) {
|
if (size == arena_maxclass) {
|
||||||
assert(run_ind == map_bias);
|
assert(run_ind == map_bias);
|
||||||
assert(run_pages == (arena_maxclass >> LG_PAGE));
|
assert(run_pages == (arena_maxclass >> LG_PAGE));
|
||||||
arena_chunk_dealloc(arena, chunk);
|
arena_chunk_dalloc(arena, chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2283,6 +2364,10 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
|
|||||||
astats->nmalloc_large += arena->stats.nmalloc_large;
|
astats->nmalloc_large += arena->stats.nmalloc_large;
|
||||||
astats->ndalloc_large += arena->stats.ndalloc_large;
|
astats->ndalloc_large += arena->stats.ndalloc_large;
|
||||||
astats->nrequests_large += arena->stats.nrequests_large;
|
astats->nrequests_large += arena->stats.nrequests_large;
|
||||||
|
astats->allocated_huge += arena->stats.allocated_huge;
|
||||||
|
astats->nmalloc_huge += arena->stats.nmalloc_huge;
|
||||||
|
astats->ndalloc_huge += arena->stats.ndalloc_huge;
|
||||||
|
astats->nrequests_huge += arena->stats.nrequests_huge;
|
||||||
|
|
||||||
for (i = 0; i < nlclasses; i++) {
|
for (i = 0; i < nlclasses; i++) {
|
||||||
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
|
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
|
||||||
@ -2320,7 +2405,7 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
arena->ind = ind;
|
arena->ind = ind;
|
||||||
arena->nthreads = 0;
|
arena->nthreads = 0;
|
||||||
arena->chunk_alloc = chunk_alloc_default;
|
arena->chunk_alloc = chunk_alloc_default;
|
||||||
arena->chunk_dealloc = (chunk_dealloc_t *)chunk_unmap;
|
arena->chunk_dalloc = chunk_dalloc_default;
|
||||||
|
|
||||||
if (malloc_mutex_init(&arena->lock))
|
if (malloc_mutex_init(&arena->lock))
|
||||||
return (true);
|
return (true);
|
||||||
|
12
src/base.c
12
src/base.c
@ -16,24 +16,16 @@ static void *base_next_addr;
|
|||||||
static void *base_past_addr; /* Addr immediately past base_pages. */
|
static void *base_past_addr; /* Addr immediately past base_pages. */
|
||||||
static extent_node_t *base_nodes;
|
static extent_node_t *base_nodes;
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Function prototypes for non-inline static functions. */
|
|
||||||
|
|
||||||
static bool base_pages_alloc(size_t minsize);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
base_pages_alloc(size_t minsize)
|
base_pages_alloc(size_t minsize)
|
||||||
{
|
{
|
||||||
size_t csize;
|
size_t csize;
|
||||||
bool zero;
|
|
||||||
|
|
||||||
assert(minsize != 0);
|
assert(minsize != 0);
|
||||||
csize = CHUNK_CEILING(minsize);
|
csize = CHUNK_CEILING(minsize);
|
||||||
zero = false;
|
base_pages = chunk_alloc_base(csize);
|
||||||
base_pages = chunk_alloc(NULL, csize, chunksize, true, &zero,
|
|
||||||
chunk_dss_prec_get());
|
|
||||||
if (base_pages == NULL)
|
if (base_pages == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
base_next_addr = base_pages;
|
base_next_addr = base_pages;
|
||||||
@ -100,7 +92,7 @@ base_node_alloc(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
base_node_dealloc(extent_node_t *node)
|
base_node_dalloc(extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||||
|
153
src/chunk.c
153
src/chunk.c
@ -31,13 +31,12 @@ size_t map_bias;
|
|||||||
size_t arena_maxclass; /* Max size class for arenas. */
|
size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/*
|
||||||
|
* Function prototypes for static functions that are referenced prior to
|
||||||
|
* definition.
|
||||||
|
*/
|
||||||
|
|
||||||
static void *chunk_recycle(extent_tree_t *chunks_szad,
|
static void chunk_dalloc_core(void *chunk, size_t size);
|
||||||
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
|
|
||||||
bool *zero);
|
|
||||||
static void chunk_record(extent_tree_t *chunks_szad,
|
|
||||||
extent_tree_t *chunks_ad, void *chunk, size_t size);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -104,7 +103,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
node = base_node_alloc();
|
node = base_node_alloc();
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
chunk_dealloc(NULL, ret, size, true);
|
chunk_dalloc_core(ret, size);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
@ -119,7 +118,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
|
||||||
if (node != NULL)
|
if (node != NULL)
|
||||||
base_node_dealloc(node);
|
base_node_dalloc(node);
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
if (zeroed == false)
|
if (zeroed == false)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
@ -179,9 +178,73 @@ chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static bool
|
||||||
* Default arena chunk allocation routine in the absence of user-override.
|
chunk_register(void *chunk, size_t size, bool base)
|
||||||
*/
|
{
|
||||||
|
|
||||||
|
assert(chunk != NULL);
|
||||||
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||||
|
|
||||||
|
if (config_ivsalloc && base == false) {
|
||||||
|
if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
if (config_stats || config_prof) {
|
||||||
|
bool gdump;
|
||||||
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
|
if (config_stats)
|
||||||
|
stats_chunks.nchunks += (size / chunksize);
|
||||||
|
stats_chunks.curchunks += (size / chunksize);
|
||||||
|
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
||||||
|
stats_chunks.highchunks =
|
||||||
|
stats_chunks.curchunks;
|
||||||
|
if (config_prof)
|
||||||
|
gdump = true;
|
||||||
|
} else if (config_prof)
|
||||||
|
gdump = false;
|
||||||
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
||||||
|
prof_gdump();
|
||||||
|
}
|
||||||
|
if (config_valgrind)
|
||||||
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
chunk_alloc_base(size_t size)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
bool zero;
|
||||||
|
|
||||||
|
zero = false;
|
||||||
|
ret = chunk_alloc_core(size, chunksize, true, &zero,
|
||||||
|
chunk_dss_prec_get());
|
||||||
|
if (ret == NULL)
|
||||||
|
return (NULL);
|
||||||
|
if (chunk_register(ret, size, true)) {
|
||||||
|
chunk_dalloc_core(ret, size);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
|
||||||
|
unsigned arena_ind, size_t size, size_t alignment, bool *zero)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
ret = chunk_alloc(size, alignment, zero, arena_ind);
|
||||||
|
if (ret != NULL && chunk_register(ret, size, false)) {
|
||||||
|
chunk_dalloc(ret, size, arena_ind);
|
||||||
|
ret = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Default arena chunk allocation routine in the absence of user override. */
|
||||||
void *
|
void *
|
||||||
chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
||||||
unsigned arena_ind)
|
unsigned arena_ind)
|
||||||
@ -191,48 +254,6 @@ chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
|||||||
arenas[arena_ind]->dss_prec));
|
arenas[arena_ind]->dss_prec));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
|
||||||
chunk_alloc(arena_t *arena, size_t size, size_t alignment, bool base,
|
|
||||||
bool *zero, dss_prec_t dss_prec)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
if (arena)
|
|
||||||
ret = arena->chunk_alloc(size, alignment, zero, arena->ind);
|
|
||||||
else
|
|
||||||
ret = chunk_alloc_core(size, alignment, base, zero, dss_prec);
|
|
||||||
|
|
||||||
if (ret != NULL) {
|
|
||||||
if (config_ivsalloc && base == false) {
|
|
||||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
|
|
||||||
chunk_dealloc(arena, ret, size, true);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (config_stats || config_prof) {
|
|
||||||
bool gdump;
|
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
|
||||||
if (config_stats)
|
|
||||||
stats_chunks.nchunks += (size / chunksize);
|
|
||||||
stats_chunks.curchunks += (size / chunksize);
|
|
||||||
if (stats_chunks.curchunks > stats_chunks.highchunks) {
|
|
||||||
stats_chunks.highchunks =
|
|
||||||
stats_chunks.curchunks;
|
|
||||||
if (config_prof)
|
|
||||||
gdump = true;
|
|
||||||
} else if (config_prof)
|
|
||||||
gdump = false;
|
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
|
||||||
if (config_prof && opt_prof && opt_prof_gdump && gdump)
|
|
||||||
prof_gdump();
|
|
||||||
}
|
|
||||||
if (config_valgrind)
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
|
||||||
}
|
|
||||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||||
size_t size)
|
size_t size)
|
||||||
@ -316,9 +337,9 @@ label_return:
|
|||||||
* avoid potential deadlock.
|
* avoid potential deadlock.
|
||||||
*/
|
*/
|
||||||
if (xnode != NULL)
|
if (xnode != NULL)
|
||||||
base_node_dealloc(xnode);
|
base_node_dalloc(xnode);
|
||||||
if (xprev != NULL)
|
if (xprev != NULL)
|
||||||
base_node_dealloc(xprev);
|
base_node_dalloc(xprev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -331,12 +352,12 @@ chunk_unmap(void *chunk, size_t size)
|
|||||||
|
|
||||||
if (have_dss && chunk_in_dss(chunk))
|
if (have_dss && chunk_in_dss(chunk))
|
||||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||||
else if (chunk_dealloc_mmap(chunk, size))
|
else if (chunk_dalloc_mmap(chunk, size))
|
||||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
chunk_dealloc(arena_t *arena, void *chunk, size_t size, bool unmap)
|
chunk_dalloc_core(void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -353,12 +374,16 @@ chunk_dealloc(arena_t *arena, void *chunk, size_t size, bool unmap)
|
|||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unmap) {
|
chunk_unmap(chunk, size);
|
||||||
if (arena)
|
}
|
||||||
arena->chunk_dealloc(chunk, size, arena->ind);
|
|
||||||
else
|
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||||
chunk_unmap(chunk, size);
|
bool
|
||||||
}
|
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
chunk_dalloc_core(chunk, size);
|
||||||
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
|
@ -200,7 +200,7 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_dealloc_mmap(void *chunk, size_t size)
|
chunk_dalloc_mmap(void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (config_munmap)
|
if (config_munmap)
|
||||||
|
68
src/ctl.c
68
src/ctl.c
@ -76,7 +76,6 @@ CTL_PROTO(thread_deallocatedp)
|
|||||||
CTL_PROTO(config_debug)
|
CTL_PROTO(config_debug)
|
||||||
CTL_PROTO(config_fill)
|
CTL_PROTO(config_fill)
|
||||||
CTL_PROTO(config_lazy_lock)
|
CTL_PROTO(config_lazy_lock)
|
||||||
CTL_PROTO(config_mremap)
|
|
||||||
CTL_PROTO(config_munmap)
|
CTL_PROTO(config_munmap)
|
||||||
CTL_PROTO(config_prof)
|
CTL_PROTO(config_prof)
|
||||||
CTL_PROTO(config_prof_libgcc)
|
CTL_PROTO(config_prof_libgcc)
|
||||||
@ -114,7 +113,7 @@ CTL_PROTO(arena_i_purge)
|
|||||||
static void arena_purge(unsigned arena_ind);
|
static void arena_purge(unsigned arena_ind);
|
||||||
CTL_PROTO(arena_i_dss)
|
CTL_PROTO(arena_i_dss)
|
||||||
CTL_PROTO(arena_i_chunk_alloc)
|
CTL_PROTO(arena_i_chunk_alloc)
|
||||||
CTL_PROTO(arena_i_chunk_dealloc)
|
CTL_PROTO(arena_i_chunk_dalloc)
|
||||||
INDEX_PROTO(arena_i)
|
INDEX_PROTO(arena_i)
|
||||||
CTL_PROTO(arenas_bin_i_size)
|
CTL_PROTO(arenas_bin_i_size)
|
||||||
CTL_PROTO(arenas_bin_i_nregs)
|
CTL_PROTO(arenas_bin_i_nregs)
|
||||||
@ -137,9 +136,6 @@ CTL_PROTO(prof_interval)
|
|||||||
CTL_PROTO(stats_chunks_current)
|
CTL_PROTO(stats_chunks_current)
|
||||||
CTL_PROTO(stats_chunks_total)
|
CTL_PROTO(stats_chunks_total)
|
||||||
CTL_PROTO(stats_chunks_high)
|
CTL_PROTO(stats_chunks_high)
|
||||||
CTL_PROTO(stats_huge_allocated)
|
|
||||||
CTL_PROTO(stats_huge_nmalloc)
|
|
||||||
CTL_PROTO(stats_huge_ndalloc)
|
|
||||||
CTL_PROTO(stats_arenas_i_small_allocated)
|
CTL_PROTO(stats_arenas_i_small_allocated)
|
||||||
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
||||||
@ -148,6 +144,10 @@ CTL_PROTO(stats_arenas_i_large_allocated)
|
|||||||
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
||||||
CTL_PROTO(stats_arenas_i_large_nrequests)
|
CTL_PROTO(stats_arenas_i_large_nrequests)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_allocated)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_nmalloc)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_ndalloc)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_nrequests)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_allocated)
|
CTL_PROTO(stats_arenas_i_bins_j_allocated)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
|
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
|
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
|
||||||
@ -214,7 +214,6 @@ static const ctl_named_node_t config_node[] = {
|
|||||||
{NAME("debug"), CTL(config_debug)},
|
{NAME("debug"), CTL(config_debug)},
|
||||||
{NAME("fill"), CTL(config_fill)},
|
{NAME("fill"), CTL(config_fill)},
|
||||||
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
||||||
{NAME("mremap"), CTL(config_mremap)},
|
|
||||||
{NAME("munmap"), CTL(config_munmap)},
|
{NAME("munmap"), CTL(config_munmap)},
|
||||||
{NAME("prof"), CTL(config_prof)},
|
{NAME("prof"), CTL(config_prof)},
|
||||||
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
||||||
@ -255,7 +254,7 @@ static const ctl_named_node_t opt_node[] = {
|
|||||||
|
|
||||||
static const ctl_named_node_t chunk_node[] = {
|
static const ctl_named_node_t chunk_node[] = {
|
||||||
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
|
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
|
||||||
{NAME("dealloc"), CTL(arena_i_chunk_dealloc)}
|
{NAME("dalloc"), CTL(arena_i_chunk_dalloc)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t arena_i_node[] = {
|
static const ctl_named_node_t arena_i_node[] = {
|
||||||
@ -321,12 +320,6 @@ static const ctl_named_node_t stats_chunks_node[] = {
|
|||||||
{NAME("high"), CTL(stats_chunks_high)}
|
{NAME("high"), CTL(stats_chunks_high)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_huge_node[] = {
|
|
||||||
{NAME("allocated"), CTL(stats_huge_allocated)},
|
|
||||||
{NAME("nmalloc"), CTL(stats_huge_nmalloc)},
|
|
||||||
{NAME("ndalloc"), CTL(stats_huge_ndalloc)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
|
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
|
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
|
||||||
@ -341,6 +334,13 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = {
|
|||||||
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
|
||||||
|
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
|
||||||
|
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
|
||||||
|
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
|
||||||
|
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)},
|
||||||
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
|
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
|
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
|
||||||
@ -385,6 +385,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
|
|||||||
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
||||||
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
||||||
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
||||||
|
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
|
||||||
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
||||||
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
|
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
|
||||||
};
|
};
|
||||||
@ -402,7 +403,6 @@ static const ctl_named_node_t stats_node[] = {
|
|||||||
{NAME("active"), CTL(stats_active)},
|
{NAME("active"), CTL(stats_active)},
|
||||||
{NAME("mapped"), CTL(stats_mapped)},
|
{NAME("mapped"), CTL(stats_mapped)},
|
||||||
{NAME("chunks"), CHILD(named, stats_chunks)},
|
{NAME("chunks"), CHILD(named, stats_chunks)},
|
||||||
{NAME("huge"), CHILD(named, stats_huge)},
|
|
||||||
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -500,6 +500,11 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
|||||||
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
||||||
sstats->astats.nrequests_large += astats->astats.nrequests_large;
|
sstats->astats.nrequests_large += astats->astats.nrequests_large;
|
||||||
|
|
||||||
|
sstats->astats.allocated_huge += astats->astats.allocated_huge;
|
||||||
|
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
|
||||||
|
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
|
||||||
|
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
|
||||||
|
|
||||||
for (i = 0; i < nlclasses; i++) {
|
for (i = 0; i < nlclasses; i++) {
|
||||||
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
||||||
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
||||||
@ -626,12 +631,6 @@ ctl_refresh(void)
|
|||||||
ctl_stats.chunks.total = stats_chunks.nchunks;
|
ctl_stats.chunks.total = stats_chunks.nchunks;
|
||||||
ctl_stats.chunks.high = stats_chunks.highchunks;
|
ctl_stats.chunks.high = stats_chunks.highchunks;
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
|
||||||
ctl_stats.huge.allocated = huge_allocated;
|
|
||||||
ctl_stats.huge.nmalloc = huge_nmalloc;
|
|
||||||
ctl_stats.huge.ndalloc = huge_ndalloc;
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -662,10 +661,9 @@ ctl_refresh(void)
|
|||||||
ctl_stats.allocated =
|
ctl_stats.allocated =
|
||||||
ctl_stats.arenas[ctl_stats.narenas].allocated_small
|
ctl_stats.arenas[ctl_stats.narenas].allocated_small
|
||||||
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
|
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
|
||||||
+ ctl_stats.huge.allocated;
|
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
|
||||||
ctl_stats.active =
|
ctl_stats.active =
|
||||||
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
|
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
|
||||||
+ ctl_stats.huge.allocated;
|
|
||||||
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1140,7 +1138,6 @@ label_return:
|
|||||||
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
|
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
|
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_prof)
|
CTL_RO_BOOL_CONFIG_GEN(config_prof)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
|
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
|
||||||
@ -1377,8 +1374,8 @@ label_return:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||||
void *newp, size_t newlen)
|
size_t *oldlenp, void *newp, size_t newlen)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned arena_ind = mib[1];
|
unsigned arena_ind = mib[1];
|
||||||
@ -1402,8 +1399,8 @@ label_outer_return:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
arena_i_chunk_dealloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
|
arena_i_chunk_dalloc_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||||
void *newp, size_t newlen)
|
size_t *oldlenp, void *newp, size_t newlen)
|
||||||
{
|
{
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
@ -1413,8 +1410,8 @@ arena_i_chunk_dealloc_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *
|
|||||||
malloc_mutex_lock(&ctl_mtx);
|
malloc_mutex_lock(&ctl_mtx);
|
||||||
if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
|
if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
READ(arena->chunk_dealloc, chunk_dealloc_t *);
|
READ(arena->chunk_dalloc, chunk_dalloc_t *);
|
||||||
WRITE(arena->chunk_dealloc, chunk_dealloc_t *);
|
WRITE(arena->chunk_dalloc, chunk_dalloc_t *);
|
||||||
} else {
|
} else {
|
||||||
ret = EFAULT;
|
ret = EFAULT;
|
||||||
goto label_outer_return;
|
goto label_outer_return;
|
||||||
@ -1611,9 +1608,6 @@ CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
|
|||||||
size_t)
|
size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
|
|
||||||
|
|
||||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||||
@ -1644,6 +1638,14 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
|
|||||||
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
||||||
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
|
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.nrequests_huge, uint64_t)
|
||||||
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
|
||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
|
||||||
|
120
src/huge.c
120
src/huge.c
@ -4,11 +4,8 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
uint64_t huge_nmalloc;
|
/* Protects chunk-related data structures. */
|
||||||
uint64_t huge_ndalloc;
|
static malloc_mutex_t huge_mtx;
|
||||||
size_t huge_allocated;
|
|
||||||
|
|
||||||
malloc_mutex_t huge_mtx;
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -16,15 +13,14 @@ malloc_mutex_t huge_mtx;
|
|||||||
static extent_tree_t huge;
|
static extent_tree_t huge;
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_malloc(arena_t *arena, size_t size, bool zero, dss_prec_t dss_prec)
|
huge_malloc(arena_t *arena, size_t size, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (huge_palloc(arena, size, chunksize, zero, dss_prec));
|
return (huge_palloc(arena, size, chunksize, zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
|
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
|
||||||
dss_prec_t dss_prec)
|
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize;
|
size_t csize;
|
||||||
@ -49,9 +45,10 @@ huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
|
|||||||
* it is possible to make correct junk/zero fill decisions below.
|
* it is possible to make correct junk/zero fill decisions below.
|
||||||
*/
|
*/
|
||||||
is_zeroed = zero;
|
is_zeroed = zero;
|
||||||
ret = chunk_alloc(arena, csize, alignment, false, &is_zeroed, dss_prec);
|
arena = choose_arena(arena);
|
||||||
|
ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
base_node_dealloc(node);
|
base_node_dalloc(node);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,11 +59,6 @@ huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero,
|
|||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
if (config_stats) {
|
|
||||||
stats_cactive_add(csize);
|
|
||||||
huge_nmalloc++;
|
|
||||||
huge_allocated += csize;
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
if (config_fill && zero == false) {
|
if (config_fill && zero == false) {
|
||||||
@ -99,8 +91,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc,
|
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
|
||||||
dss_prec_t dss_prec)
|
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t copysize;
|
size_t copysize;
|
||||||
@ -115,18 +106,18 @@ huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
|||||||
* space and copying.
|
* space and copying.
|
||||||
*/
|
*/
|
||||||
if (alignment > chunksize)
|
if (alignment > chunksize)
|
||||||
ret = huge_palloc(arena, size + extra, alignment, zero, dss_prec);
|
ret = huge_palloc(arena, size + extra, alignment, zero);
|
||||||
else
|
else
|
||||||
ret = huge_malloc(arena, size + extra, zero, dss_prec);
|
ret = huge_malloc(arena, size + extra, zero);
|
||||||
|
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
if (extra == 0)
|
if (extra == 0)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
/* Try again, this time without extra. */
|
/* Try again, this time without extra. */
|
||||||
if (alignment > chunksize)
|
if (alignment > chunksize)
|
||||||
ret = huge_palloc(arena, size, alignment, zero, dss_prec);
|
ret = huge_palloc(arena, size, alignment, zero);
|
||||||
else
|
else
|
||||||
ret = huge_malloc(arena, size, zero, dss_prec);
|
ret = huge_malloc(arena, size, zero);
|
||||||
|
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -137,59 +128,8 @@ huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
|||||||
* expectation that the extra bytes will be reliably preserved.
|
* expectation that the extra bytes will be reliably preserved.
|
||||||
*/
|
*/
|
||||||
copysize = (size < oldsize) ? size : oldsize;
|
copysize = (size < oldsize) ? size : oldsize;
|
||||||
|
memcpy(ret, ptr, copysize);
|
||||||
#ifdef JEMALLOC_MREMAP
|
iqalloct(ptr, try_tcache_dalloc);
|
||||||
/*
|
|
||||||
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
|
||||||
* source nor the destination are in dss.
|
|
||||||
*/
|
|
||||||
if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr)
|
|
||||||
== false && chunk_in_dss(ret) == false))) {
|
|
||||||
size_t newsize = huge_salloc(ret);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove ptr from the tree of huge allocations before
|
|
||||||
* performing the remap operation, in order to avoid the
|
|
||||||
* possibility of another thread acquiring that mapping before
|
|
||||||
* this one removes it from the tree.
|
|
||||||
*/
|
|
||||||
huge_dalloc(ptr, false);
|
|
||||||
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
|
|
||||||
ret) == MAP_FAILED) {
|
|
||||||
/*
|
|
||||||
* Assuming no chunk management bugs in the allocator,
|
|
||||||
* the only documented way an error can occur here is
|
|
||||||
* if the application changed the map type for a
|
|
||||||
* portion of the old allocation. This is firmly in
|
|
||||||
* undefined behavior territory, so write a diagnostic
|
|
||||||
* message, and optionally abort.
|
|
||||||
*/
|
|
||||||
char buf[BUFERROR_BUF];
|
|
||||||
|
|
||||||
buferror(get_errno(), buf, sizeof(buf));
|
|
||||||
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
|
|
||||||
buf);
|
|
||||||
if (opt_abort)
|
|
||||||
abort();
|
|
||||||
memcpy(ret, ptr, copysize);
|
|
||||||
chunk_dealloc_mmap(ptr, oldsize);
|
|
||||||
} else if (config_fill && zero == false && opt_junk && oldsize
|
|
||||||
< newsize) {
|
|
||||||
/*
|
|
||||||
* mremap(2) clobbers the original mapping, so
|
|
||||||
* junk/zero filling is not preserved. There is no
|
|
||||||
* need to zero fill here, since any trailing
|
|
||||||
* uninititialized memory is demand-zeroed by the
|
|
||||||
* kernel, but junk filling must be redone.
|
|
||||||
*/
|
|
||||||
memset(ret + oldsize, 0xa5, newsize - oldsize);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
memcpy(ret, ptr, copysize);
|
|
||||||
iqalloct(ptr, try_tcache_dalloc);
|
|
||||||
}
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -217,7 +157,7 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_dalloc(void *ptr, bool unmap)
|
huge_dalloc(void *ptr)
|
||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
extent_node_t *node, key;
|
||||||
|
|
||||||
@ -230,20 +170,11 @@ huge_dalloc(void *ptr, bool unmap)
|
|||||||
assert(node->addr == ptr);
|
assert(node->addr == ptr);
|
||||||
extent_tree_ad_remove(&huge, node);
|
extent_tree_ad_remove(&huge, node);
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
stats_cactive_sub(node->size);
|
|
||||||
huge_ndalloc++;
|
|
||||||
huge_allocated -= node->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
if (unmap)
|
huge_dalloc_junk(node->addr, node->size);
|
||||||
huge_dalloc_junk(node->addr, node->size);
|
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
||||||
|
base_node_dalloc(node);
|
||||||
chunk_dealloc(node->arena, node->addr, node->size, unmap);
|
|
||||||
|
|
||||||
base_node_dealloc(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
@ -266,13 +197,6 @@ huge_salloc(const void *ptr)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
dss_prec_t
|
|
||||||
huge_dss_prec_get(arena_t *arena)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (arena_dss_prec_get(choose_arena(arena)));
|
|
||||||
}
|
|
||||||
|
|
||||||
prof_ctx_t *
|
prof_ctx_t *
|
||||||
huge_prof_ctx_get(const void *ptr)
|
huge_prof_ctx_get(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -319,12 +243,6 @@ huge_boot(void)
|
|||||||
return (true);
|
return (true);
|
||||||
extent_tree_ad_new(&huge);
|
extent_tree_ad_new(&huge);
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
huge_nmalloc = 0;
|
|
||||||
huge_ndalloc = 0;
|
|
||||||
huge_allocated = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1983,7 +1983,7 @@ a0alloc(size_t size, bool zero)
|
|||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
return (arena_malloc(arenas[0], size, zero, false));
|
return (arena_malloc(arenas[0], size, zero, false));
|
||||||
else
|
else
|
||||||
return (huge_malloc(NULL, size, zero, huge_dss_prec_get(arenas[0])));
|
return (huge_malloc(NULL, size, zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -2012,7 +2012,7 @@ a0free(void *ptr)
|
|||||||
if (chunk != ptr)
|
if (chunk != ptr)
|
||||||
arena_dalloc(chunk, ptr, false);
|
arena_dalloc(chunk, ptr, false);
|
||||||
else
|
else
|
||||||
huge_dalloc(ptr, true);
|
huge_dalloc(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
29
src/stats.c
29
src/stats.c
@ -213,6 +213,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
||||||
size_t large_allocated;
|
size_t large_allocated;
|
||||||
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
||||||
|
size_t huge_allocated;
|
||||||
|
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
|
||||||
|
|
||||||
CTL_GET("arenas.page", &page, size_t);
|
CTL_GET("arenas.page", &page, size_t);
|
||||||
|
|
||||||
@ -249,12 +251,19 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||||
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
|
||||||
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
|
"huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||||
|
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||||
small_allocated + large_allocated,
|
small_allocated + large_allocated + huge_allocated,
|
||||||
small_nmalloc + large_nmalloc,
|
small_nmalloc + large_nmalloc + huge_nmalloc,
|
||||||
small_ndalloc + large_ndalloc,
|
small_ndalloc + large_ndalloc + huge_ndalloc,
|
||||||
small_nrequests + large_nrequests);
|
small_nrequests + large_nrequests + huge_nrequests);
|
||||||
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
|
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
|
||||||
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
||||||
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
||||||
@ -458,8 +467,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
size_t allocated, active, mapped;
|
size_t allocated, active, mapped;
|
||||||
size_t chunks_current, chunks_high;
|
size_t chunks_current, chunks_high;
|
||||||
uint64_t chunks_total;
|
uint64_t chunks_total;
|
||||||
size_t huge_allocated;
|
|
||||||
uint64_t huge_nmalloc, huge_ndalloc;
|
|
||||||
|
|
||||||
CTL_GET("stats.cactive", &cactive, size_t *);
|
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||||
CTL_GET("stats.allocated", &allocated, size_t);
|
CTL_GET("stats.allocated", &allocated, size_t);
|
||||||
@ -481,16 +488,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
" %13"PRIu64" %12zu %12zu\n",
|
" %13"PRIu64" %12zu %12zu\n",
|
||||||
chunks_total, chunks_high, chunks_current);
|
chunks_total, chunks_high, chunks_current);
|
||||||
|
|
||||||
/* Print huge stats. */
|
|
||||||
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
|
|
||||||
CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
|
|
||||||
CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"huge: nmalloc ndalloc allocated\n");
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" %12"PRIu64" %12"PRIu64" %12zu\n",
|
|
||||||
huge_nmalloc, huge_ndalloc, huge_allocated);
|
|
||||||
|
|
||||||
if (merged) {
|
if (merged) {
|
||||||
unsigned narenas;
|
unsigned narenas;
|
||||||
|
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
chunk_alloc_t *old_alloc;
|
chunk_alloc_t *old_alloc;
|
||||||
chunk_dealloc_t *old_dealloc;
|
chunk_dalloc_t *old_dalloc;
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_dealloc(void *chunk, size_t size, unsigned arena_ind)
|
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (old_dealloc(chunk, size, arena_ind));
|
return (old_dalloc(chunk, size, arena_ind));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -21,11 +21,11 @@ TEST_BEGIN(test_chunk)
|
|||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
chunk_alloc_t *new_alloc;
|
chunk_alloc_t *new_alloc;
|
||||||
chunk_dealloc_t *new_dealloc;
|
chunk_dalloc_t *new_dalloc;
|
||||||
size_t old_size, new_size;
|
size_t old_size, new_size;
|
||||||
|
|
||||||
new_alloc = chunk_alloc;
|
new_alloc = chunk_alloc;
|
||||||
new_dealloc = chunk_dealloc;
|
new_dalloc = chunk_dalloc;
|
||||||
old_size = sizeof(chunk_alloc_t *);
|
old_size = sizeof(chunk_alloc_t *);
|
||||||
new_size = sizeof(chunk_alloc_t *);
|
new_size = sizeof(chunk_alloc_t *);
|
||||||
|
|
||||||
@ -34,11 +34,9 @@ TEST_BEGIN(test_chunk)
|
|||||||
"Unexpected alloc error");
|
"Unexpected alloc error");
|
||||||
assert_ptr_ne(old_alloc, new_alloc,
|
assert_ptr_ne(old_alloc, new_alloc,
|
||||||
"Unexpected alloc error");
|
"Unexpected alloc error");
|
||||||
assert_d_eq(mallctl("arena.0.chunk.dealloc", &old_dealloc,
|
assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
|
||||||
&old_size, &new_dealloc, new_size), 0,
|
&new_dalloc, new_size), 0, "Unexpected dalloc error");
|
||||||
"Unexpected dealloc error");
|
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
|
||||||
assert_ptr_ne(old_dealloc, new_dealloc,
|
|
||||||
"Unexpected dealloc error");
|
|
||||||
|
|
||||||
p = mallocx(42, 0);
|
p = mallocx(42, 0);
|
||||||
assert_ptr_ne(p, NULL, "Unexpected alloc error");
|
assert_ptr_ne(p, NULL, "Unexpected alloc error");
|
||||||
@ -47,9 +45,8 @@ TEST_BEGIN(test_chunk)
|
|||||||
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
|
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
|
||||||
NULL, &old_alloc, old_size), 0,
|
NULL, &old_alloc, old_size), 0,
|
||||||
"Unexpected alloc error");
|
"Unexpected alloc error");
|
||||||
assert_d_eq(mallctl("arena.0.chunk.dealloc", NULL,
|
assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
|
||||||
NULL, &old_dealloc, old_size), 0,
|
old_size), 0, "Unexpected dalloc error");
|
||||||
"Unexpected dealloc error");
|
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
|
@ -1,45 +0,0 @@
|
|||||||
#include "test/jemalloc_test.h"
|
|
||||||
|
|
||||||
TEST_BEGIN(test_mremap)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
size_t sz, lg_chunk, chunksize, i;
|
|
||||||
char *p, *q;
|
|
||||||
|
|
||||||
sz = sizeof(lg_chunk);
|
|
||||||
err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0);
|
|
||||||
assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err));
|
|
||||||
chunksize = ((size_t)1U) << lg_chunk;
|
|
||||||
|
|
||||||
p = (char *)malloc(chunksize);
|
|
||||||
assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p);
|
|
||||||
memset(p, 'a', chunksize);
|
|
||||||
|
|
||||||
q = (char *)realloc(p, chunksize * 2);
|
|
||||||
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2,
|
|
||||||
q);
|
|
||||||
for (i = 0; i < chunksize; i++) {
|
|
||||||
assert_c_eq(q[i], 'a',
|
|
||||||
"realloc() should preserve existing bytes across copies");
|
|
||||||
}
|
|
||||||
|
|
||||||
p = q;
|
|
||||||
|
|
||||||
q = (char *)realloc(p, chunksize);
|
|
||||||
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q);
|
|
||||||
for (i = 0; i < chunksize; i++) {
|
|
||||||
assert_c_eq(q[i], 'a',
|
|
||||||
"realloc() should preserve existing bytes across copies");
|
|
||||||
}
|
|
||||||
|
|
||||||
free(q);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
int
|
|
||||||
main(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (test(
|
|
||||||
test_mremap));
|
|
||||||
}
|
|
@ -92,12 +92,9 @@ test_junk(size_t sz_min, size_t sz_max)
|
|||||||
s = (char *)rallocx(s, sz+1, 0);
|
s = (char *)rallocx(s, sz+1, 0);
|
||||||
assert_ptr_not_null((void *)s,
|
assert_ptr_not_null((void *)s,
|
||||||
"Unexpected rallocx() failure");
|
"Unexpected rallocx() failure");
|
||||||
if (!config_mremap || sz+1 <= arena_maxclass) {
|
assert_ptr_eq(most_recently_junked, junked,
|
||||||
assert_ptr_eq(most_recently_junked, junked,
|
"Expected region of size %zu to be junk-filled",
|
||||||
"Expected region of size %zu to be "
|
sz);
|
||||||
"junk-filled",
|
|
||||||
sz);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,7 +129,6 @@ TEST_BEGIN(test_mallctl_config)
|
|||||||
TEST_MALLCTL_CONFIG(debug);
|
TEST_MALLCTL_CONFIG(debug);
|
||||||
TEST_MALLCTL_CONFIG(fill);
|
TEST_MALLCTL_CONFIG(fill);
|
||||||
TEST_MALLCTL_CONFIG(lazy_lock);
|
TEST_MALLCTL_CONFIG(lazy_lock);
|
||||||
TEST_MALLCTL_CONFIG(mremap);
|
|
||||||
TEST_MALLCTL_CONFIG(munmap);
|
TEST_MALLCTL_CONFIG(munmap);
|
||||||
TEST_MALLCTL_CONFIG(prof);
|
TEST_MALLCTL_CONFIG(prof);
|
||||||
TEST_MALLCTL_CONFIG(prof_libgcc);
|
TEST_MALLCTL_CONFIG(prof_libgcc);
|
||||||
|
@ -60,7 +60,7 @@ TEST_BEGIN(test_stats_huge)
|
|||||||
void *p;
|
void *p;
|
||||||
uint64_t epoch;
|
uint64_t epoch;
|
||||||
size_t allocated;
|
size_t allocated;
|
||||||
uint64_t nmalloc, ndalloc;
|
uint64_t nmalloc, ndalloc, nrequests;
|
||||||
size_t sz;
|
size_t sz;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
@ -71,19 +71,23 @@ TEST_BEGIN(test_stats_huge)
|
|||||||
"Unexpected mallctl() failure");
|
"Unexpected mallctl() failure");
|
||||||
|
|
||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0),
|
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
|
||||||
expected, "Unexpected mallctl() result");
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
sz = sizeof(uint64_t);
|
sz = sizeof(uint64_t);
|
||||||
assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0),
|
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
|
||||||
expected, "Unexpected mallctl() result");
|
0), expected, "Unexpected mallctl() result");
|
||||||
assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0),
|
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
|
||||||
expected, "Unexpected mallctl() result");
|
0), expected, "Unexpected mallctl() result");
|
||||||
|
assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
|
||||||
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
assert_zu_gt(allocated, 0,
|
assert_zu_gt(allocated, 0,
|
||||||
"allocated should be greater than zero");
|
"allocated should be greater than zero");
|
||||||
assert_u64_ge(nmalloc, ndalloc,
|
assert_u64_ge(nmalloc, ndalloc,
|
||||||
"nmalloc should be at least as large as ndalloc");
|
"nmalloc should be at least as large as ndalloc");
|
||||||
|
assert_u64_le(nmalloc, nrequests,
|
||||||
|
"nmalloc should no larger than nrequests");
|
||||||
}
|
}
|
||||||
|
|
||||||
dallocx(p, 0);
|
dallocx(p, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user