Merge branch 'pr/80' into dev
This commit is contained in:
commit
d4a238ccb0
6
INSTALL
6
INSTALL
@ -132,12 +132,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
released in bulk, thus reducing the total number of mutex operations. See
|
released in bulk, thus reducing the total number of mutex operations. See
|
||||||
the "opt.tcache" option for usage details.
|
the "opt.tcache" option for usage details.
|
||||||
|
|
||||||
--enable-mremap
|
|
||||||
Enable huge realloc() via mremap(2). mremap() is disabled by default
|
|
||||||
because the flavor used is specific to Linux, which has a quirk in its
|
|
||||||
virtual memory allocation algorithm that causes semi-permanent VM map holes
|
|
||||||
under normal jemalloc operation.
|
|
||||||
|
|
||||||
--disable-munmap
|
--disable-munmap
|
||||||
Disable virtual memory deallocation via munmap(2); instead keep track of
|
Disable virtual memory deallocation via munmap(2); instead keep track of
|
||||||
the virtual memory for later use. munmap() is disabled by default (i.e.
|
the virtual memory for later use. munmap() is disabled by default (i.e.
|
||||||
|
@ -137,12 +137,12 @@ TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
|
|||||||
$(srcroot)test/integration/allocated.c \
|
$(srcroot)test/integration/allocated.c \
|
||||||
$(srcroot)test/integration/mallocx.c \
|
$(srcroot)test/integration/mallocx.c \
|
||||||
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
$(srcroot)test/integration/MALLOCX_ARENA.c \
|
||||||
$(srcroot)test/integration/mremap.c \
|
|
||||||
$(srcroot)test/integration/posix_memalign.c \
|
$(srcroot)test/integration/posix_memalign.c \
|
||||||
$(srcroot)test/integration/rallocx.c \
|
$(srcroot)test/integration/rallocx.c \
|
||||||
$(srcroot)test/integration/thread_arena.c \
|
$(srcroot)test/integration/thread_arena.c \
|
||||||
$(srcroot)test/integration/thread_tcache_enabled.c \
|
$(srcroot)test/integration/thread_tcache_enabled.c \
|
||||||
$(srcroot)test/integration/xallocx.c
|
$(srcroot)test/integration/xallocx.c \
|
||||||
|
$(srcroot)test/integration/chunk.c
|
||||||
TESTS_STRESS :=
|
TESTS_STRESS :=
|
||||||
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
|
TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_STRESS)
|
||||||
|
|
||||||
|
28
configure.ac
28
configure.ac
@ -793,33 +793,6 @@ if test "x$enable_tcache" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_tcache])
|
AC_SUBST([enable_tcache])
|
||||||
|
|
||||||
dnl Disable mremap() for huge realloc() by default.
|
|
||||||
AC_ARG_ENABLE([mremap],
|
|
||||||
[AS_HELP_STRING([--enable-mremap], [Enable mremap(2) for huge realloc()])],
|
|
||||||
[if test "x$enable_mremap" = "xno" ; then
|
|
||||||
enable_mremap="0"
|
|
||||||
else
|
|
||||||
enable_mremap="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_mremap="0"]
|
|
||||||
)
|
|
||||||
if test "x$enable_mremap" = "x1" ; then
|
|
||||||
JE_COMPILABLE([mremap(...MREMAP_FIXED...)], [
|
|
||||||
#define _GNU_SOURCE
|
|
||||||
#include <sys/mman.h>
|
|
||||||
], [
|
|
||||||
void *p = mremap((void *)0, 0, 0, MREMAP_MAYMOVE|MREMAP_FIXED, (void *)0);
|
|
||||||
], [je_cv_mremap_fixed])
|
|
||||||
if test "x${je_cv_mremap_fixed}" = "xno" ; then
|
|
||||||
enable_mremap="0"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
if test "x$enable_mremap" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_MREMAP], [ ])
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_mremap])
|
|
||||||
|
|
||||||
dnl Enable VM deallocation via munmap() by default.
|
dnl Enable VM deallocation via munmap() by default.
|
||||||
AC_ARG_ENABLE([munmap],
|
AC_ARG_ENABLE([munmap],
|
||||||
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
|
||||||
@ -1447,7 +1420,6 @@ AC_MSG_RESULT([fill : ${enable_fill}])
|
|||||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||||
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
|
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
|
||||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||||
AC_MSG_RESULT([mremap : ${enable_mremap}])
|
|
||||||
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
AC_MSG_RESULT([munmap : ${enable_munmap}])
|
||||||
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
|
||||||
AC_MSG_RESULT([tls : ${enable_tls}])
|
AC_MSG_RESULT([tls : ${enable_tls}])
|
||||||
|
@ -486,10 +486,11 @@ for (i = 0; i < nbins; i++) {
|
|||||||
<para>User objects are broken into three categories according to size:
|
<para>User objects are broken into three categories according to size:
|
||||||
small, large, and huge. Small objects are smaller than one page. Large
|
small, large, and huge. Small objects are smaller than one page. Large
|
||||||
objects are smaller than the chunk size. Huge objects are a multiple of
|
objects are smaller than the chunk size. Huge objects are a multiple of
|
||||||
the chunk size. Small and large objects are managed by arenas; huge
|
the chunk size. Small and large objects are managed entirely by arenas;
|
||||||
objects are managed separately in a single data structure that is shared by
|
huge objects are additionally aggregated in a single data structure that is
|
||||||
all threads. Huge objects are used by applications infrequently enough
|
shared by all threads. Huge objects are typically used by applications
|
||||||
that this single data structure is not a scalability issue.</para>
|
infrequently enough that this single data structure is not a scalability
|
||||||
|
issue.</para>
|
||||||
|
|
||||||
<para>Each chunk that is managed by an arena tracks its contents as runs of
|
<para>Each chunk that is managed by an arena tracks its contents as runs of
|
||||||
contiguous pages (unused, backing a set of small objects, or backing one
|
contiguous pages (unused, backing a set of small objects, or backing one
|
||||||
@ -647,16 +648,6 @@ for (i = 0; i < nbins; i++) {
|
|||||||
during build configuration.</para></listitem>
|
during build configuration.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="config.mremap">
|
|
||||||
<term>
|
|
||||||
<mallctl>config.mremap</mallctl>
|
|
||||||
(<type>bool</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
</term>
|
|
||||||
<listitem><para><option>--enable-mremap</option> was specified during
|
|
||||||
build configuration.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="config.munmap">
|
<varlistentry id="config.munmap">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>config.munmap</mallctl>
|
<mallctl>config.munmap</mallctl>
|
||||||
@ -1273,14 +1264,77 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<listitem><para>Set the precedence of dss allocation as related to mmap
|
<listitem><para>Set the precedence of dss allocation as related to mmap
|
||||||
allocation for arena <i>, or for all arenas if <i> equals
|
allocation for arena <i>, or for all arenas if <i> equals
|
||||||
<link
|
<link
|
||||||
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. Note
|
linkend="arenas.narenas"><mallctl>arenas.narenas</mallctl></link>. See
|
||||||
that even during huge allocation this setting is read from the arena
|
<link linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
|
||||||
that would be chosen for small or large allocation so that applications
|
settings.</para></listitem>
|
||||||
can depend on consistent dss versus mmap allocation regardless of
|
</varlistentry>
|
||||||
allocation size. See <link
|
|
||||||
linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
|
<varlistentry id="arena.i.chunk.alloc">
|
||||||
settings.
|
<term>
|
||||||
</para></listitem>
|
<mallctl>arena.<i>.chunk.alloc</mallctl>
|
||||||
|
(<type>chunk_alloc_t *</type>)
|
||||||
|
<literal>rw</literal>
|
||||||
|
</term>
|
||||||
|
<listitem><para>Get or set the chunk allocation function for arena
|
||||||
|
<i>. If setting, the chunk deallocation function should
|
||||||
|
also be set via <link linkend="arena.i.chunk.dalloc">
|
||||||
|
<mallctl>arena.<i>.chunk.dalloc</mallctl></link> to a companion
|
||||||
|
function that knows how to deallocate the chunks.
|
||||||
|
<funcprototype>
|
||||||
|
<funcdef>typedef void *<function>(chunk_alloc_t)</function></funcdef>
|
||||||
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>alignment</parameter></paramdef>
|
||||||
|
<paramdef>bool *<parameter>zero</parameter></paramdef>
|
||||||
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
|
</funcprototype>
|
||||||
|
A chunk allocation function conforms to the <type>chunk_alloc_t</type>
|
||||||
|
type and upon success returns a pointer to <parameter>size</parameter>
|
||||||
|
bytes of memory on behalf of arena <parameter>arena_ind</parameter> such
|
||||||
|
that the chunk's base address is a multiple of
|
||||||
|
<parameter>alignment</parameter>, as well as setting
|
||||||
|
<parameter>*zero</parameter> to indicate whether the chunk is zeroed.
|
||||||
|
Upon error the function returns <constant>NULL</constant> and leaves
|
||||||
|
<parameter>*zero</parameter> unmodified. The
|
||||||
|
<parameter>size</parameter> parameter is always a multiple of the chunk
|
||||||
|
size. The <parameter>alignment</parameter> parameter is always a power
|
||||||
|
of two at least as large as the chunk size. Zeroing is mandatory if
|
||||||
|
<parameter>*zero</parameter> is true upon function
|
||||||
|
entry.</para>
|
||||||
|
|
||||||
|
<para>Note that replacing the default chunk allocation function makes
|
||||||
|
the arena's <link
|
||||||
|
linkend="arena.i.dss"><mallctl>arena.<i>.dss</mallctl></link>
|
||||||
|
setting irrelevant.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="arena.i.chunk.dalloc">
|
||||||
|
<term>
|
||||||
|
<mallctl>arena.<i>.chunk.dalloc</mallctl>
|
||||||
|
(<type>chunk_dalloc_t *</type>)
|
||||||
|
<literal>rw</literal>
|
||||||
|
</term>
|
||||||
|
<listitem><para>Get or set the chunk deallocation function for arena
|
||||||
|
<i>. If setting, the chunk deallocation function must
|
||||||
|
be capable of deallocating all extant chunks associated with arena
|
||||||
|
<i>, usually by passing unknown chunks to the deallocation
|
||||||
|
function that was replaced. In practice, it is feasible to control
|
||||||
|
allocation for arenas created via <link
|
||||||
|
linkend="arenas.extend"><mallctl>arenas.extend</mallctl></link> such
|
||||||
|
that all chunks originate from an application-supplied chunk allocator
|
||||||
|
(by setting custom chunk allocation/deallocation functions just after
|
||||||
|
arena creation), but the automatically created arenas may have already
|
||||||
|
created chunks prior to the application having an opportunity to take
|
||||||
|
over chunk allocation.
|
||||||
|
<funcprototype>
|
||||||
|
<funcdef>typedef void <function>(chunk_dalloc_t)</function></funcdef>
|
||||||
|
<paramdef>void *<parameter>chunk</parameter></paramdef>
|
||||||
|
<paramdef>size_t <parameter>size</parameter></paramdef>
|
||||||
|
<paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
|
||||||
|
</funcprototype>
|
||||||
|
A chunk deallocation function conforms to the
|
||||||
|
<type>chunk_dalloc_t</type> type and deallocates a
|
||||||
|
<parameter>chunk</parameter> of given <parameter>size</parameter> on
|
||||||
|
behalf of arena <parameter>arena_ind</parameter>.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="arenas.narenas">
|
<varlistentry id="arenas.narenas">
|
||||||
@ -1545,39 +1599,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
</para></listitem>
|
</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.huge.allocated">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.huge.allocated</mallctl>
|
|
||||||
(<type>size_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Number of bytes currently allocated by huge objects.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.huge.nmalloc">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.huge.nmalloc</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of huge allocation requests.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.huge.ndalloc">
|
|
||||||
<term>
|
|
||||||
<mallctl>stats.huge.ndalloc</mallctl>
|
|
||||||
(<type>uint64_t</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-stats</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Cumulative number of huge deallocation requests.
|
|
||||||
</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.dss">
|
<varlistentry id="stats.arenas.i.dss">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.dss</mallctl>
|
<mallctl>stats.arenas.<i>.dss</mallctl>
|
||||||
@ -1754,6 +1775,50 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
</para></listitem>
|
</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.allocated">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.allocated</mallctl>
|
||||||
|
(<type>size_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Number of bytes currently allocated by huge objects.
|
||||||
|
</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.nmalloc">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.nmalloc</mallctl>
|
||||||
|
(<type>uint64_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Cumulative number of huge allocation requests served
|
||||||
|
directly by the arena.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.ndalloc">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.ndalloc</mallctl>
|
||||||
|
(<type>uint64_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Cumulative number of huge deallocation requests served
|
||||||
|
directly by the arena.</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry id="stats.arenas.i.huge.nrequests">
|
||||||
|
<term>
|
||||||
|
<mallctl>stats.arenas.<i>.huge.nrequests</mallctl>
|
||||||
|
(<type>uint64_t</type>)
|
||||||
|
<literal>r-</literal>
|
||||||
|
[<option>--enable-stats</option>]
|
||||||
|
</term>
|
||||||
|
<listitem><para>Cumulative number of huge allocation requests.
|
||||||
|
</para></listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="stats.arenas.i.bins.j.allocated">
|
<varlistentry id="stats.arenas.i.bins.j.allocated">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>stats.arenas.<i>.bins.<j>.allocated</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.allocated</mallctl>
|
||||||
|
@ -345,7 +345,7 @@ struct arena_s {
|
|||||||
*/
|
*/
|
||||||
arena_chunk_t *spare;
|
arena_chunk_t *spare;
|
||||||
|
|
||||||
/* Number of pages in active runs. */
|
/* Number of pages in active runs and huge regions. */
|
||||||
size_t nactive;
|
size_t nactive;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -370,6 +370,12 @@ struct arena_s {
|
|||||||
*/
|
*/
|
||||||
arena_avail_tree_t runs_avail;
|
arena_avail_tree_t runs_avail;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* user-configureable chunk allocation and deallocation functions.
|
||||||
|
*/
|
||||||
|
chunk_alloc_t *chunk_alloc;
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
/* bins is used to store trees of free regions. */
|
/* bins is used to store trees of free regions. */
|
||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
};
|
};
|
||||||
@ -397,6 +403,9 @@ extern arena_bin_info_t arena_bin_info[NBINS];
|
|||||||
/* Number of large size classes. */
|
/* Number of large size classes. */
|
||||||
#define nlclasses (chunk_npages - map_bias)
|
#define nlclasses (chunk_npages - map_bias)
|
||||||
|
|
||||||
|
void *arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
|
||||||
|
bool *zero);
|
||||||
|
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size);
|
||||||
void arena_purge_all(arena_t *arena);
|
void arena_purge_all(arena_t *arena);
|
||||||
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
||||||
size_t binind, uint64_t prof_accumbytes);
|
size_t binind, uint64_t prof_accumbytes);
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
void *base_alloc(size_t size);
|
void *base_alloc(size_t size);
|
||||||
void *base_calloc(size_t number, size_t size);
|
void *base_calloc(size_t number, size_t size);
|
||||||
extent_node_t *base_node_alloc(void);
|
extent_node_t *base_node_alloc(void);
|
||||||
void base_node_dealloc(extent_node_t *node);
|
void base_node_dalloc(extent_node_t *node);
|
||||||
bool base_boot(void);
|
bool base_boot(void);
|
||||||
void base_prefork(void);
|
void base_prefork(void);
|
||||||
void base_postfork_parent(void);
|
void base_postfork_parent(void);
|
||||||
|
@ -43,10 +43,14 @@ extern size_t chunk_npages;
|
|||||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
void *chunk_alloc_base(size_t size);
|
||||||
dss_prec_t dss_prec);
|
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
||||||
|
chunk_dalloc_t *chunk_dalloc, unsigned arena_ind, size_t size,
|
||||||
|
size_t alignment, bool *zero);
|
||||||
|
void *chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
||||||
|
unsigned arena_ind);
|
||||||
void chunk_unmap(void *chunk, size_t size);
|
void chunk_unmap(void *chunk, size_t size);
|
||||||
void chunk_dealloc(void *chunk, size_t size, bool unmap);
|
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
void chunk_prefork(void);
|
void chunk_prefork(void);
|
||||||
void chunk_postfork_parent(void);
|
void chunk_postfork_parent(void);
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
bool pages_purge(void *addr, size_t length);
|
bool pages_purge(void *addr, size_t length);
|
||||||
|
|
||||||
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
|
||||||
bool chunk_dealloc_mmap(void *chunk, size_t size);
|
bool chunk_dalloc_mmap(void *chunk, size_t size);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -57,11 +57,6 @@ struct ctl_stats_s {
|
|||||||
uint64_t total; /* stats_chunks.nchunks */
|
uint64_t total; /* stats_chunks.nchunks */
|
||||||
size_t high; /* stats_chunks.highchunks */
|
size_t high; /* stats_chunks.highchunks */
|
||||||
} chunks;
|
} chunks;
|
||||||
struct {
|
|
||||||
size_t allocated; /* huge_allocated */
|
|
||||||
uint64_t nmalloc; /* huge_nmalloc */
|
|
||||||
uint64_t ndalloc; /* huge_ndalloc */
|
|
||||||
} huge;
|
|
||||||
unsigned narenas;
|
unsigned narenas;
|
||||||
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
|
||||||
};
|
};
|
||||||
|
@ -24,6 +24,9 @@ struct extent_node_s {
|
|||||||
/* Total region size. */
|
/* Total region size. */
|
||||||
size_t size;
|
size_t size;
|
||||||
|
|
||||||
|
/* Arena from which this extent came, if any */
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
/* True if zero-filled; used by chunk recycling code. */
|
/* True if zero-filled; used by chunk recycling code. */
|
||||||
bool zeroed;
|
bool zeroed;
|
||||||
};
|
};
|
||||||
|
@ -9,28 +9,18 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
#ifdef JEMALLOC_H_EXTERNS
|
#ifdef JEMALLOC_H_EXTERNS
|
||||||
|
|
||||||
/* Huge allocation statistics. */
|
void *huge_malloc(arena_t *arena, size_t size, bool zero);
|
||||||
extern uint64_t huge_nmalloc;
|
void *huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
|
||||||
extern uint64_t huge_ndalloc;
|
|
||||||
extern size_t huge_allocated;
|
|
||||||
|
|
||||||
/* Protects chunk-related data structures. */
|
|
||||||
extern malloc_mutex_t huge_mtx;
|
|
||||||
|
|
||||||
void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec);
|
|
||||||
void *huge_palloc(size_t size, size_t alignment, bool zero,
|
|
||||||
dss_prec_t dss_prec);
|
|
||||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra);
|
size_t extra);
|
||||||
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
void *huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec);
|
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||||
#endif
|
#endif
|
||||||
void huge_dalloc(void *ptr, bool unmap);
|
void huge_dalloc(void *ptr);
|
||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(const void *ptr);
|
||||||
dss_prec_t huge_dss_prec_get(arena_t *arena);
|
|
||||||
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
|
||||||
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
|
||||||
bool huge_boot(void);
|
bool huge_boot(void);
|
||||||
|
@ -122,13 +122,6 @@ static const bool config_prof_libunwind =
|
|||||||
false
|
false
|
||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
static const bool config_mremap =
|
|
||||||
#ifdef JEMALLOC_MREMAP
|
|
||||||
true
|
|
||||||
#else
|
|
||||||
false
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
static const bool config_munmap =
|
static const bool config_munmap =
|
||||||
#ifdef JEMALLOC_MUNMAP
|
#ifdef JEMALLOC_MUNMAP
|
||||||
true
|
true
|
||||||
@ -702,7 +695,7 @@ imalloct(size_t size, bool try_tcache, arena_t *arena)
|
|||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
return (arena_malloc(arena, size, false, try_tcache));
|
return (arena_malloc(arena, size, false, try_tcache));
|
||||||
else
|
else
|
||||||
return (huge_malloc(size, false, huge_dss_prec_get(arena)));
|
return (huge_malloc(arena, size, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -719,7 +712,7 @@ icalloct(size_t size, bool try_tcache, arena_t *arena)
|
|||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
return (arena_malloc(arena, size, true, try_tcache));
|
return (arena_malloc(arena, size, true, try_tcache));
|
||||||
else
|
else
|
||||||
return (huge_malloc(size, true, huge_dss_prec_get(arena)));
|
return (huge_malloc(arena, size, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -745,9 +738,9 @@ ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
|
|||||||
ret = arena_palloc(choose_arena(arena), usize,
|
ret = arena_palloc(choose_arena(arena), usize,
|
||||||
alignment, zero);
|
alignment, zero);
|
||||||
} else if (alignment <= chunksize)
|
} else if (alignment <= chunksize)
|
||||||
ret = huge_malloc(usize, zero, huge_dss_prec_get(arena));
|
ret = huge_malloc(arena, usize, zero);
|
||||||
else
|
else
|
||||||
ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena));
|
ret = huge_palloc(arena, usize, alignment, zero);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
@ -829,7 +822,7 @@ idalloct(void *ptr, bool try_tcache)
|
|||||||
if (chunk != ptr)
|
if (chunk != ptr)
|
||||||
arena_dalloc(chunk, ptr, try_tcache);
|
arena_dalloc(chunk, ptr, try_tcache);
|
||||||
else
|
else
|
||||||
huge_dalloc(ptr, true);
|
huge_dalloc(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -915,8 +908,8 @@ iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
|
|||||||
alignment, zero, try_tcache_alloc,
|
alignment, zero, try_tcache_alloc,
|
||||||
try_tcache_dalloc));
|
try_tcache_dalloc));
|
||||||
} else {
|
} else {
|
||||||
return (huge_ralloc(ptr, oldsize, size, extra,
|
return (huge_ralloc(arena, ptr, oldsize, size, extra,
|
||||||
alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena)));
|
alignment, zero, try_tcache_dalloc));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,13 +144,6 @@
|
|||||||
*/
|
*/
|
||||||
#undef JEMALLOC_MUNMAP
|
#undef JEMALLOC_MUNMAP
|
||||||
|
|
||||||
/*
|
|
||||||
* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is
|
|
||||||
* disabled by default because it is Linux-specific and it will cause virtual
|
|
||||||
* memory map holes, much like munmap(2) does.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_MREMAP
|
|
||||||
|
|
||||||
/* TLS is used to map arenas and magazine caches to threads. */
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
#undef JEMALLOC_TLS
|
#undef JEMALLOC_TLS
|
||||||
|
|
||||||
|
@ -5,6 +5,8 @@ arena_alloc_junk_small
|
|||||||
arena_bin_index
|
arena_bin_index
|
||||||
arena_bin_info
|
arena_bin_info
|
||||||
arena_boot
|
arena_boot
|
||||||
|
arena_chunk_alloc_huge
|
||||||
|
arena_chunk_dalloc_huge
|
||||||
arena_dalloc
|
arena_dalloc
|
||||||
arena_dalloc_bin
|
arena_dalloc_bin
|
||||||
arena_dalloc_bin_locked
|
arena_dalloc_bin_locked
|
||||||
@ -86,7 +88,7 @@ base_alloc
|
|||||||
base_boot
|
base_boot
|
||||||
base_calloc
|
base_calloc
|
||||||
base_node_alloc
|
base_node_alloc
|
||||||
base_node_dealloc
|
base_node_dalloc
|
||||||
base_postfork_child
|
base_postfork_child
|
||||||
base_postfork_parent
|
base_postfork_parent
|
||||||
base_prefork
|
base_prefork
|
||||||
@ -103,12 +105,14 @@ bt_init
|
|||||||
buferror
|
buferror
|
||||||
choose_arena
|
choose_arena
|
||||||
choose_arena_hard
|
choose_arena_hard
|
||||||
chunk_alloc
|
chunk_alloc_arena
|
||||||
|
chunk_alloc_base
|
||||||
|
chunk_alloc_default
|
||||||
chunk_alloc_dss
|
chunk_alloc_dss
|
||||||
chunk_alloc_mmap
|
chunk_alloc_mmap
|
||||||
chunk_boot
|
chunk_boot
|
||||||
chunk_dealloc
|
chunk_dalloc_default
|
||||||
chunk_dealloc_mmap
|
chunk_dalloc_mmap
|
||||||
chunk_dss_boot
|
chunk_dss_boot
|
||||||
chunk_dss_postfork_child
|
chunk_dss_postfork_child
|
||||||
chunk_dss_postfork_parent
|
chunk_dss_postfork_parent
|
||||||
@ -197,9 +201,7 @@ huge_allocated
|
|||||||
huge_boot
|
huge_boot
|
||||||
huge_dalloc
|
huge_dalloc
|
||||||
huge_dalloc_junk
|
huge_dalloc_junk
|
||||||
huge_dss_prec_get
|
|
||||||
huge_malloc
|
huge_malloc
|
||||||
huge_mtx
|
|
||||||
huge_ndalloc
|
huge_ndalloc
|
||||||
huge_nmalloc
|
huge_nmalloc
|
||||||
huge_palloc
|
huge_palloc
|
||||||
|
@ -101,6 +101,11 @@ struct arena_stats_s {
|
|||||||
uint64_t ndalloc_large;
|
uint64_t ndalloc_large;
|
||||||
uint64_t nrequests_large;
|
uint64_t nrequests_large;
|
||||||
|
|
||||||
|
size_t allocated_huge;
|
||||||
|
uint64_t nmalloc_huge;
|
||||||
|
uint64_t ndalloc_huge;
|
||||||
|
uint64_t nrequests_huge;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* One element for each possible size class, including sizes that
|
* One element for each possible size class, including sizes that
|
||||||
* overlap with bin size classes. This is necessary because ipalloc()
|
* overlap with bin size classes. This is necessary because ipalloc()
|
||||||
|
@ -44,3 +44,6 @@ JEMALLOC_EXPORT void * @je_@memalign(size_t alignment, size_t size)
|
|||||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||||
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
|
JEMALLOC_EXPORT void * @je_@valloc(size_t size) JEMALLOC_ATTR(malloc);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
typedef void *(chunk_alloc_t)(size_t, size_t, bool *, unsigned);
|
||||||
|
typedef bool (chunk_dalloc_t)(void *, size_t, unsigned);
|
||||||
|
113
src/arena.c
113
src/arena.c
@ -559,6 +559,65 @@ arena_chunk_init_spare(arena_t *arena)
|
|||||||
return (chunk);
|
return (chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static arena_chunk_t *
|
||||||
|
arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
|
{
|
||||||
|
arena_chunk_t *chunk;
|
||||||
|
chunk_alloc_t *chunk_alloc;
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
chunk_alloc = arena->chunk_alloc;
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc,
|
||||||
|
arena->ind, size, alignment, zero);
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
if (config_stats && chunk != NULL)
|
||||||
|
arena->stats.mapped += chunksize;
|
||||||
|
|
||||||
|
return (chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
chunk_alloc_t *chunk_alloc;
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
chunk_alloc = arena->chunk_alloc;
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
if (config_stats) {
|
||||||
|
/* Optimistically update stats prior to unlocking. */
|
||||||
|
arena->stats.mapped += size;
|
||||||
|
arena->stats.allocated_huge += size;
|
||||||
|
arena->stats.nmalloc_huge++;
|
||||||
|
arena->stats.nrequests_huge++;
|
||||||
|
}
|
||||||
|
arena->nactive += (size >> LG_PAGE);
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
|
||||||
|
ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind,
|
||||||
|
size, alignment, zero);
|
||||||
|
if (config_stats) {
|
||||||
|
if (ret != NULL)
|
||||||
|
stats_cactive_add(size);
|
||||||
|
else {
|
||||||
|
/* Revert optimistic stats updates. */
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
arena->stats.mapped -= size;
|
||||||
|
arena->stats.allocated_huge -= size;
|
||||||
|
arena->stats.nmalloc_huge--;
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static arena_chunk_t *
|
||||||
arena_chunk_init_hard(arena_t *arena)
|
arena_chunk_init_hard(arena_t *arena)
|
||||||
{
|
{
|
||||||
@ -569,14 +628,9 @@ arena_chunk_init_hard(arena_t *arena)
|
|||||||
assert(arena->spare == NULL);
|
assert(arena->spare == NULL);
|
||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero);
|
||||||
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
|
|
||||||
&zero, arena->dss_prec);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
if (chunk == NULL)
|
if (chunk == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (config_stats)
|
|
||||||
arena->stats.mapped += chunksize;
|
|
||||||
|
|
||||||
chunk->arena = arena;
|
chunk->arena = arena;
|
||||||
|
|
||||||
@ -645,7 +699,38 @@ arena_chunk_alloc(arena_t *arena)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk)
|
||||||
|
{
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk_dalloc((void *)chunk, chunksize, arena->ind);
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
if (config_stats)
|
||||||
|
arena->stats.mapped -= chunksize;
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size)
|
||||||
|
{
|
||||||
|
chunk_dalloc_t *chunk_dalloc;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
chunk_dalloc = arena->chunk_dalloc;
|
||||||
|
if (config_stats) {
|
||||||
|
arena->stats.mapped -= size;
|
||||||
|
arena->stats.allocated_huge -= size;
|
||||||
|
arena->stats.ndalloc_huge++;
|
||||||
|
stats_cactive_sub(size);
|
||||||
|
}
|
||||||
|
arena->nactive -= (size >> LG_PAGE);
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk_dalloc(chunk, size, arena->ind);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
||||||
{
|
{
|
||||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||||
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
||||||
@ -667,11 +752,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
arena_chunk_t *spare = arena->spare;
|
arena_chunk_t *spare = arena->spare;
|
||||||
|
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
malloc_mutex_unlock(&arena->lock);
|
arena_chunk_dalloc_internal(arena, spare);
|
||||||
chunk_dealloc((void *)spare, chunksize, true);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
if (config_stats)
|
|
||||||
arena->stats.mapped -= chunksize;
|
|
||||||
} else
|
} else
|
||||||
arena->spare = chunk;
|
arena->spare = chunk;
|
||||||
}
|
}
|
||||||
@ -1231,7 +1312,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
|
|||||||
if (size == arena_maxclass) {
|
if (size == arena_maxclass) {
|
||||||
assert(run_ind == map_bias);
|
assert(run_ind == map_bias);
|
||||||
assert(run_pages == (arena_maxclass >> LG_PAGE));
|
assert(run_pages == (arena_maxclass >> LG_PAGE));
|
||||||
arena_chunk_dealloc(arena, chunk);
|
arena_chunk_dalloc(arena, chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2283,6 +2364,10 @@ arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
|
|||||||
astats->nmalloc_large += arena->stats.nmalloc_large;
|
astats->nmalloc_large += arena->stats.nmalloc_large;
|
||||||
astats->ndalloc_large += arena->stats.ndalloc_large;
|
astats->ndalloc_large += arena->stats.ndalloc_large;
|
||||||
astats->nrequests_large += arena->stats.nrequests_large;
|
astats->nrequests_large += arena->stats.nrequests_large;
|
||||||
|
astats->allocated_huge += arena->stats.allocated_huge;
|
||||||
|
astats->nmalloc_huge += arena->stats.nmalloc_huge;
|
||||||
|
astats->ndalloc_huge += arena->stats.ndalloc_huge;
|
||||||
|
astats->nrequests_huge += arena->stats.nrequests_huge;
|
||||||
|
|
||||||
for (i = 0; i < nlclasses; i++) {
|
for (i = 0; i < nlclasses; i++) {
|
||||||
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
|
lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
|
||||||
@ -2319,6 +2404,8 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
|
|
||||||
arena->ind = ind;
|
arena->ind = ind;
|
||||||
arena->nthreads = 0;
|
arena->nthreads = 0;
|
||||||
|
arena->chunk_alloc = chunk_alloc_default;
|
||||||
|
arena->chunk_dalloc = chunk_dalloc_default;
|
||||||
|
|
||||||
if (malloc_mutex_init(&arena->lock))
|
if (malloc_mutex_init(&arena->lock))
|
||||||
return (true);
|
return (true);
|
||||||
|
12
src/base.c
12
src/base.c
@ -16,24 +16,16 @@ static void *base_next_addr;
|
|||||||
static void *base_past_addr; /* Addr immediately past base_pages. */
|
static void *base_past_addr; /* Addr immediately past base_pages. */
|
||||||
static extent_node_t *base_nodes;
|
static extent_node_t *base_nodes;
|
||||||
|
|
||||||
/******************************************************************************/
|
|
||||||
/* Function prototypes for non-inline static functions. */
|
|
||||||
|
|
||||||
static bool base_pages_alloc(size_t minsize);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
base_pages_alloc(size_t minsize)
|
base_pages_alloc(size_t minsize)
|
||||||
{
|
{
|
||||||
size_t csize;
|
size_t csize;
|
||||||
bool zero;
|
|
||||||
|
|
||||||
assert(minsize != 0);
|
assert(minsize != 0);
|
||||||
csize = CHUNK_CEILING(minsize);
|
csize = CHUNK_CEILING(minsize);
|
||||||
zero = false;
|
base_pages = chunk_alloc_base(csize);
|
||||||
base_pages = chunk_alloc(csize, chunksize, true, &zero,
|
|
||||||
chunk_dss_prec_get());
|
|
||||||
if (base_pages == NULL)
|
if (base_pages == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
base_next_addr = base_pages;
|
base_next_addr = base_pages;
|
||||||
@ -100,7 +92,7 @@ base_node_alloc(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
base_node_dealloc(extent_node_t *node)
|
base_node_dalloc(extent_node_t *node)
|
||||||
{
|
{
|
||||||
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
|
||||||
|
113
src/chunk.c
113
src/chunk.c
@ -31,13 +31,12 @@ size_t map_bias;
|
|||||||
size_t arena_maxclass; /* Max size class for arenas. */
|
size_t arena_maxclass; /* Max size class for arenas. */
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Function prototypes for non-inline static functions. */
|
/*
|
||||||
|
* Function prototypes for static functions that are referenced prior to
|
||||||
|
* definition.
|
||||||
|
*/
|
||||||
|
|
||||||
static void *chunk_recycle(extent_tree_t *chunks_szad,
|
static void chunk_dalloc_core(void *chunk, size_t size);
|
||||||
extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
|
|
||||||
bool *zero);
|
|
||||||
static void chunk_record(extent_tree_t *chunks_szad,
|
|
||||||
extent_tree_t *chunks_ad, void *chunk, size_t size);
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -104,7 +103,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
node = base_node_alloc();
|
node = base_node_alloc();
|
||||||
if (node == NULL) {
|
if (node == NULL) {
|
||||||
chunk_dealloc(ret, size, true);
|
chunk_dalloc_core(ret, size);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
malloc_mutex_lock(&chunks_mtx);
|
malloc_mutex_lock(&chunks_mtx);
|
||||||
@ -119,7 +118,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
|
||||||
if (node != NULL)
|
if (node != NULL)
|
||||||
base_node_dealloc(node);
|
base_node_dalloc(node);
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
if (zeroed == false)
|
if (zeroed == false)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
@ -141,8 +140,8 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
|
|||||||
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
* takes advantage of this to avoid demanding zeroed chunks, but taking
|
||||||
* advantage of them if they are returned.
|
* advantage of them if they are returned.
|
||||||
*/
|
*/
|
||||||
void *
|
static void *
|
||||||
chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
|
||||||
dss_prec_t dss_prec)
|
dss_prec_t dss_prec)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
@ -156,34 +155,39 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
|
|||||||
if (have_dss && dss_prec == dss_prec_primary) {
|
if (have_dss && dss_prec == dss_prec_primary) {
|
||||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||||
alignment, base, zero)) != NULL)
|
alignment, base, zero)) != NULL)
|
||||||
goto label_return;
|
return (ret);
|
||||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||||
goto label_return;
|
return (ret);
|
||||||
}
|
}
|
||||||
/* mmap. */
|
/* mmap. */
|
||||||
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
|
if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
|
||||||
alignment, base, zero)) != NULL)
|
alignment, base, zero)) != NULL)
|
||||||
goto label_return;
|
return (ret);
|
||||||
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
|
||||||
goto label_return;
|
return (ret);
|
||||||
/* "secondary" dss. */
|
/* "secondary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_secondary) {
|
if (have_dss && dss_prec == dss_prec_secondary) {
|
||||||
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
|
||||||
alignment, base, zero)) != NULL)
|
alignment, base, zero)) != NULL)
|
||||||
goto label_return;
|
return (ret);
|
||||||
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
|
||||||
goto label_return;
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* All strategies for allocation failed. */
|
/* All strategies for allocation failed. */
|
||||||
ret = NULL;
|
|
||||||
label_return:
|
|
||||||
if (ret != NULL) {
|
|
||||||
if (config_ivsalloc && base == false) {
|
|
||||||
if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
|
|
||||||
chunk_dealloc(ret, size, true);
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
chunk_register(void *chunk, size_t size, bool base)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(chunk != NULL);
|
||||||
|
assert(CHUNK_ADDR2BASE(chunk) == chunk);
|
||||||
|
|
||||||
|
if (config_ivsalloc && base == false) {
|
||||||
|
if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
|
||||||
|
return (true);
|
||||||
}
|
}
|
||||||
if (config_stats || config_prof) {
|
if (config_stats || config_prof) {
|
||||||
bool gdump;
|
bool gdump;
|
||||||
@ -203,12 +207,53 @@ label_return:
|
|||||||
prof_gdump();
|
prof_gdump();
|
||||||
}
|
}
|
||||||
if (config_valgrind)
|
if (config_valgrind)
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
chunk_alloc_base(size_t size)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
bool zero;
|
||||||
|
|
||||||
|
zero = false;
|
||||||
|
ret = chunk_alloc_core(size, chunksize, true, &zero,
|
||||||
|
chunk_dss_prec_get());
|
||||||
|
if (ret == NULL)
|
||||||
|
return (NULL);
|
||||||
|
if (chunk_register(ret, size, true)) {
|
||||||
|
chunk_dalloc_core(ret, size);
|
||||||
|
return (NULL);
|
||||||
}
|
}
|
||||||
assert(CHUNK_ADDR2BASE(ret) == ret);
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
|
||||||
|
unsigned arena_ind, size_t size, size_t alignment, bool *zero)
|
||||||
|
{
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
ret = chunk_alloc(size, alignment, zero, arena_ind);
|
||||||
|
if (ret != NULL && chunk_register(ret, size, false)) {
|
||||||
|
chunk_dalloc(ret, size, arena_ind);
|
||||||
|
ret = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Default arena chunk allocation routine in the absence of user override. */
|
||||||
|
void *
|
||||||
|
chunk_alloc_default(size_t size, size_t alignment, bool *zero,
|
||||||
|
unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (chunk_alloc_core(size, alignment, false, zero,
|
||||||
|
arenas[arena_ind]->dss_prec));
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
|
||||||
size_t size)
|
size_t size)
|
||||||
@ -292,9 +337,9 @@ label_return:
|
|||||||
* avoid potential deadlock.
|
* avoid potential deadlock.
|
||||||
*/
|
*/
|
||||||
if (xnode != NULL)
|
if (xnode != NULL)
|
||||||
base_node_dealloc(xnode);
|
base_node_dalloc(xnode);
|
||||||
if (xprev != NULL)
|
if (xprev != NULL)
|
||||||
base_node_dealloc(xprev);
|
base_node_dalloc(xprev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -307,12 +352,12 @@ chunk_unmap(void *chunk, size_t size)
|
|||||||
|
|
||||||
if (have_dss && chunk_in_dss(chunk))
|
if (have_dss && chunk_in_dss(chunk))
|
||||||
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
|
||||||
else if (chunk_dealloc_mmap(chunk, size))
|
else if (chunk_dalloc_mmap(chunk, size))
|
||||||
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
static void
|
||||||
chunk_dealloc(void *chunk, size_t size, bool unmap)
|
chunk_dalloc_core(void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(chunk != NULL);
|
assert(chunk != NULL);
|
||||||
@ -329,10 +374,18 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
|
|||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unmap)
|
|
||||||
chunk_unmap(chunk, size);
|
chunk_unmap(chunk, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Default arena chunk deallocation routine in the absence of user override. */
|
||||||
|
bool
|
||||||
|
chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
chunk_dalloc_core(chunk, size);
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_boot(void)
|
chunk_boot(void)
|
||||||
{
|
{
|
||||||
|
@ -200,7 +200,7 @@ chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
chunk_dealloc_mmap(void *chunk, size_t size)
|
chunk_dalloc_mmap(void *chunk, size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (config_munmap)
|
if (config_munmap)
|
||||||
|
113
src/ctl.c
113
src/ctl.c
@ -76,7 +76,6 @@ CTL_PROTO(thread_deallocatedp)
|
|||||||
CTL_PROTO(config_debug)
|
CTL_PROTO(config_debug)
|
||||||
CTL_PROTO(config_fill)
|
CTL_PROTO(config_fill)
|
||||||
CTL_PROTO(config_lazy_lock)
|
CTL_PROTO(config_lazy_lock)
|
||||||
CTL_PROTO(config_mremap)
|
|
||||||
CTL_PROTO(config_munmap)
|
CTL_PROTO(config_munmap)
|
||||||
CTL_PROTO(config_prof)
|
CTL_PROTO(config_prof)
|
||||||
CTL_PROTO(config_prof_libgcc)
|
CTL_PROTO(config_prof_libgcc)
|
||||||
@ -113,6 +112,8 @@ CTL_PROTO(opt_prof_accum)
|
|||||||
CTL_PROTO(arena_i_purge)
|
CTL_PROTO(arena_i_purge)
|
||||||
static void arena_purge(unsigned arena_ind);
|
static void arena_purge(unsigned arena_ind);
|
||||||
CTL_PROTO(arena_i_dss)
|
CTL_PROTO(arena_i_dss)
|
||||||
|
CTL_PROTO(arena_i_chunk_alloc)
|
||||||
|
CTL_PROTO(arena_i_chunk_dalloc)
|
||||||
INDEX_PROTO(arena_i)
|
INDEX_PROTO(arena_i)
|
||||||
CTL_PROTO(arenas_bin_i_size)
|
CTL_PROTO(arenas_bin_i_size)
|
||||||
CTL_PROTO(arenas_bin_i_nregs)
|
CTL_PROTO(arenas_bin_i_nregs)
|
||||||
@ -135,9 +136,6 @@ CTL_PROTO(prof_interval)
|
|||||||
CTL_PROTO(stats_chunks_current)
|
CTL_PROTO(stats_chunks_current)
|
||||||
CTL_PROTO(stats_chunks_total)
|
CTL_PROTO(stats_chunks_total)
|
||||||
CTL_PROTO(stats_chunks_high)
|
CTL_PROTO(stats_chunks_high)
|
||||||
CTL_PROTO(stats_huge_allocated)
|
|
||||||
CTL_PROTO(stats_huge_nmalloc)
|
|
||||||
CTL_PROTO(stats_huge_ndalloc)
|
|
||||||
CTL_PROTO(stats_arenas_i_small_allocated)
|
CTL_PROTO(stats_arenas_i_small_allocated)
|
||||||
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
CTL_PROTO(stats_arenas_i_small_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
CTL_PROTO(stats_arenas_i_small_ndalloc)
|
||||||
@ -146,6 +144,10 @@ CTL_PROTO(stats_arenas_i_large_allocated)
|
|||||||
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
CTL_PROTO(stats_arenas_i_large_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
CTL_PROTO(stats_arenas_i_large_ndalloc)
|
||||||
CTL_PROTO(stats_arenas_i_large_nrequests)
|
CTL_PROTO(stats_arenas_i_large_nrequests)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_allocated)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_nmalloc)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_ndalloc)
|
||||||
|
CTL_PROTO(stats_arenas_i_huge_nrequests)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_allocated)
|
CTL_PROTO(stats_arenas_i_bins_j_allocated)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
|
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
|
||||||
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
|
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
|
||||||
@ -212,7 +214,6 @@ static const ctl_named_node_t config_node[] = {
|
|||||||
{NAME("debug"), CTL(config_debug)},
|
{NAME("debug"), CTL(config_debug)},
|
||||||
{NAME("fill"), CTL(config_fill)},
|
{NAME("fill"), CTL(config_fill)},
|
||||||
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
{NAME("lazy_lock"), CTL(config_lazy_lock)},
|
||||||
{NAME("mremap"), CTL(config_mremap)},
|
|
||||||
{NAME("munmap"), CTL(config_munmap)},
|
{NAME("munmap"), CTL(config_munmap)},
|
||||||
{NAME("prof"), CTL(config_prof)},
|
{NAME("prof"), CTL(config_prof)},
|
||||||
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
||||||
@ -251,9 +252,15 @@ static const ctl_named_node_t opt_node[] = {
|
|||||||
{NAME("prof_accum"), CTL(opt_prof_accum)}
|
{NAME("prof_accum"), CTL(opt_prof_accum)}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const ctl_named_node_t chunk_node[] = {
|
||||||
|
{NAME("alloc"), CTL(arena_i_chunk_alloc)},
|
||||||
|
{NAME("dalloc"), CTL(arena_i_chunk_dalloc)}
|
||||||
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t arena_i_node[] = {
|
static const ctl_named_node_t arena_i_node[] = {
|
||||||
{NAME("purge"), CTL(arena_i_purge)},
|
{NAME("purge"), CTL(arena_i_purge)},
|
||||||
{NAME("dss"), CTL(arena_i_dss)}
|
{NAME("dss"), CTL(arena_i_dss)},
|
||||||
|
{NAME("chunk"), CHILD(named, chunk)},
|
||||||
};
|
};
|
||||||
static const ctl_named_node_t super_arena_i_node[] = {
|
static const ctl_named_node_t super_arena_i_node[] = {
|
||||||
{NAME(""), CHILD(named, arena_i)}
|
{NAME(""), CHILD(named, arena_i)}
|
||||||
@ -313,12 +320,6 @@ static const ctl_named_node_t stats_chunks_node[] = {
|
|||||||
{NAME("high"), CTL(stats_chunks_high)}
|
{NAME("high"), CTL(stats_chunks_high)}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_huge_node[] = {
|
|
||||||
{NAME("allocated"), CTL(stats_huge_allocated)},
|
|
||||||
{NAME("nmalloc"), CTL(stats_huge_nmalloc)},
|
|
||||||
{NAME("ndalloc"), CTL(stats_huge_ndalloc)}
|
|
||||||
};
|
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
static const ctl_named_node_t stats_arenas_i_small_node[] = {
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
|
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
|
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
|
||||||
@ -333,6 +334,13 @@ static const ctl_named_node_t stats_arenas_i_large_node[] = {
|
|||||||
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const ctl_named_node_t stats_arenas_i_huge_node[] = {
|
||||||
|
{NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
|
||||||
|
{NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
|
||||||
|
{NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
|
||||||
|
{NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)},
|
||||||
|
};
|
||||||
|
|
||||||
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
|
||||||
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
|
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
|
||||||
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
|
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
|
||||||
@ -377,6 +385,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
|
|||||||
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
{NAME("purged"), CTL(stats_arenas_i_purged)},
|
||||||
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
{NAME("small"), CHILD(named, stats_arenas_i_small)},
|
||||||
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
{NAME("large"), CHILD(named, stats_arenas_i_large)},
|
||||||
|
{NAME("huge"), CHILD(named, stats_arenas_i_huge)},
|
||||||
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
|
||||||
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
|
{NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
|
||||||
};
|
};
|
||||||
@ -394,7 +403,6 @@ static const ctl_named_node_t stats_node[] = {
|
|||||||
{NAME("active"), CTL(stats_active)},
|
{NAME("active"), CTL(stats_active)},
|
||||||
{NAME("mapped"), CTL(stats_mapped)},
|
{NAME("mapped"), CTL(stats_mapped)},
|
||||||
{NAME("chunks"), CHILD(named, stats_chunks)},
|
{NAME("chunks"), CHILD(named, stats_chunks)},
|
||||||
{NAME("huge"), CHILD(named, stats_huge)},
|
|
||||||
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
{NAME("arenas"), CHILD(indexed, stats_arenas)}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -492,6 +500,11 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
|
|||||||
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
|
||||||
sstats->astats.nrequests_large += astats->astats.nrequests_large;
|
sstats->astats.nrequests_large += astats->astats.nrequests_large;
|
||||||
|
|
||||||
|
sstats->astats.allocated_huge += astats->astats.allocated_huge;
|
||||||
|
sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
|
||||||
|
sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
|
||||||
|
sstats->astats.nrequests_huge += astats->astats.nrequests_huge;
|
||||||
|
|
||||||
for (i = 0; i < nlclasses; i++) {
|
for (i = 0; i < nlclasses; i++) {
|
||||||
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
|
||||||
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
|
||||||
@ -618,12 +631,6 @@ ctl_refresh(void)
|
|||||||
ctl_stats.chunks.total = stats_chunks.nchunks;
|
ctl_stats.chunks.total = stats_chunks.nchunks;
|
||||||
ctl_stats.chunks.high = stats_chunks.highchunks;
|
ctl_stats.chunks.high = stats_chunks.highchunks;
|
||||||
malloc_mutex_unlock(&chunks_mtx);
|
malloc_mutex_unlock(&chunks_mtx);
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
|
||||||
ctl_stats.huge.allocated = huge_allocated;
|
|
||||||
ctl_stats.huge.nmalloc = huge_nmalloc;
|
|
||||||
ctl_stats.huge.ndalloc = huge_ndalloc;
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -654,10 +661,9 @@ ctl_refresh(void)
|
|||||||
ctl_stats.allocated =
|
ctl_stats.allocated =
|
||||||
ctl_stats.arenas[ctl_stats.narenas].allocated_small
|
ctl_stats.arenas[ctl_stats.narenas].allocated_small
|
||||||
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
|
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
|
||||||
+ ctl_stats.huge.allocated;
|
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
|
||||||
ctl_stats.active =
|
ctl_stats.active =
|
||||||
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
|
(ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
|
||||||
+ ctl_stats.huge.allocated;
|
|
||||||
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1132,7 +1138,6 @@ label_return:
|
|||||||
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
CTL_RO_BOOL_CONFIG_GEN(config_debug)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
CTL_RO_BOOL_CONFIG_GEN(config_fill)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_mremap)
|
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
|
CTL_RO_BOOL_CONFIG_GEN(config_munmap)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_prof)
|
CTL_RO_BOOL_CONFIG_GEN(config_prof)
|
||||||
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
|
CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
|
||||||
@ -1368,6 +1373,57 @@ label_return:
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
arena_i_chunk_alloc_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||||
|
size_t *oldlenp, void *newp, size_t newlen)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
unsigned arena_ind = mib[1];
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&ctl_mtx);
|
||||||
|
if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
READ(arena->chunk_alloc, chunk_alloc_t *);
|
||||||
|
WRITE(arena->chunk_alloc, chunk_alloc_t *);
|
||||||
|
} else {
|
||||||
|
ret = EFAULT;
|
||||||
|
goto label_outer_return;
|
||||||
|
}
|
||||||
|
ret = 0;
|
||||||
|
label_return:
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
label_outer_return:
|
||||||
|
malloc_mutex_unlock(&ctl_mtx);
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
arena_i_chunk_dalloc_ctl(const size_t *mib, size_t miblen, void *oldp,
|
||||||
|
size_t *oldlenp, void *newp, size_t newlen)
|
||||||
|
{
|
||||||
|
|
||||||
|
int ret;
|
||||||
|
unsigned arena_ind = mib[1];
|
||||||
|
arena_t *arena;
|
||||||
|
|
||||||
|
malloc_mutex_lock(&ctl_mtx);
|
||||||
|
if (arena_ind < narenas_total && (arena = arenas[arena_ind]) != NULL) {
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
READ(arena->chunk_dalloc, chunk_dalloc_t *);
|
||||||
|
WRITE(arena->chunk_dalloc, chunk_dalloc_t *);
|
||||||
|
} else {
|
||||||
|
ret = EFAULT;
|
||||||
|
goto label_outer_return;
|
||||||
|
}
|
||||||
|
ret = 0;
|
||||||
|
label_return:
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
label_outer_return:
|
||||||
|
malloc_mutex_unlock(&ctl_mtx);
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
static const ctl_named_node_t *
|
static const ctl_named_node_t *
|
||||||
arena_i_index(const size_t *mib, size_t miblen, size_t i)
|
arena_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||||
{
|
{
|
||||||
@ -1552,9 +1608,6 @@ CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
|
|||||||
size_t)
|
size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
|
|
||||||
CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
|
|
||||||
|
|
||||||
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
|
||||||
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
|
||||||
@ -1585,6 +1638,14 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
|
|||||||
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
|
||||||
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
|
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
|
||||||
|
CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
|
||||||
|
ctl_stats.arenas[mib[2]].astats.nrequests_huge, uint64_t)
|
||||||
|
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
|
||||||
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
|
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
|
||||||
|
115
src/huge.c
115
src/huge.c
@ -4,11 +4,8 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
uint64_t huge_nmalloc;
|
/* Protects chunk-related data structures. */
|
||||||
uint64_t huge_ndalloc;
|
static malloc_mutex_t huge_mtx;
|
||||||
size_t huge_allocated;
|
|
||||||
|
|
||||||
malloc_mutex_t huge_mtx;
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
|
||||||
@ -16,14 +13,14 @@ malloc_mutex_t huge_mtx;
|
|||||||
static extent_tree_t huge;
|
static extent_tree_t huge;
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_malloc(size_t size, bool zero, dss_prec_t dss_prec)
|
huge_malloc(arena_t *arena, size_t size, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
return (huge_palloc(size, chunksize, zero, dss_prec));
|
return (huge_palloc(arena, size, chunksize, zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
|
huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t csize;
|
size_t csize;
|
||||||
@ -48,23 +45,20 @@ huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec)
|
|||||||
* it is possible to make correct junk/zero fill decisions below.
|
* it is possible to make correct junk/zero fill decisions below.
|
||||||
*/
|
*/
|
||||||
is_zeroed = zero;
|
is_zeroed = zero;
|
||||||
ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec);
|
arena = choose_arena(arena);
|
||||||
|
ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
base_node_dealloc(node);
|
base_node_dalloc(node);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Insert node into huge. */
|
/* Insert node into huge. */
|
||||||
node->addr = ret;
|
node->addr = ret;
|
||||||
node->size = csize;
|
node->size = csize;
|
||||||
|
node->arena = arena;
|
||||||
|
|
||||||
malloc_mutex_lock(&huge_mtx);
|
malloc_mutex_lock(&huge_mtx);
|
||||||
extent_tree_ad_insert(&huge, node);
|
extent_tree_ad_insert(&huge, node);
|
||||||
if (config_stats) {
|
|
||||||
stats_cactive_add(csize);
|
|
||||||
huge_nmalloc++;
|
|
||||||
huge_allocated += csize;
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
if (config_fill && zero == false) {
|
if (config_fill && zero == false) {
|
||||||
@ -96,8 +90,8 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec)
|
size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t copysize;
|
size_t copysize;
|
||||||
@ -112,18 +106,18 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
* space and copying.
|
* space and copying.
|
||||||
*/
|
*/
|
||||||
if (alignment > chunksize)
|
if (alignment > chunksize)
|
||||||
ret = huge_palloc(size + extra, alignment, zero, dss_prec);
|
ret = huge_palloc(arena, size + extra, alignment, zero);
|
||||||
else
|
else
|
||||||
ret = huge_malloc(size + extra, zero, dss_prec);
|
ret = huge_malloc(arena, size + extra, zero);
|
||||||
|
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
if (extra == 0)
|
if (extra == 0)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
/* Try again, this time without extra. */
|
/* Try again, this time without extra. */
|
||||||
if (alignment > chunksize)
|
if (alignment > chunksize)
|
||||||
ret = huge_palloc(size, alignment, zero, dss_prec);
|
ret = huge_palloc(arena, size, alignment, zero);
|
||||||
else
|
else
|
||||||
ret = huge_malloc(size, zero, dss_prec);
|
ret = huge_malloc(arena, size, zero);
|
||||||
|
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -134,59 +128,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
|
|||||||
* expectation that the extra bytes will be reliably preserved.
|
* expectation that the extra bytes will be reliably preserved.
|
||||||
*/
|
*/
|
||||||
copysize = (size < oldsize) ? size : oldsize;
|
copysize = (size < oldsize) ? size : oldsize;
|
||||||
|
|
||||||
#ifdef JEMALLOC_MREMAP
|
|
||||||
/*
|
|
||||||
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
|
|
||||||
* source nor the destination are in dss.
|
|
||||||
*/
|
|
||||||
if (oldsize >= chunksize && (have_dss == false || (chunk_in_dss(ptr)
|
|
||||||
== false && chunk_in_dss(ret) == false))) {
|
|
||||||
size_t newsize = huge_salloc(ret);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Remove ptr from the tree of huge allocations before
|
|
||||||
* performing the remap operation, in order to avoid the
|
|
||||||
* possibility of another thread acquiring that mapping before
|
|
||||||
* this one removes it from the tree.
|
|
||||||
*/
|
|
||||||
huge_dalloc(ptr, false);
|
|
||||||
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
|
|
||||||
ret) == MAP_FAILED) {
|
|
||||||
/*
|
|
||||||
* Assuming no chunk management bugs in the allocator,
|
|
||||||
* the only documented way an error can occur here is
|
|
||||||
* if the application changed the map type for a
|
|
||||||
* portion of the old allocation. This is firmly in
|
|
||||||
* undefined behavior territory, so write a diagnostic
|
|
||||||
* message, and optionally abort.
|
|
||||||
*/
|
|
||||||
char buf[BUFERROR_BUF];
|
|
||||||
|
|
||||||
buferror(get_errno(), buf, sizeof(buf));
|
|
||||||
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
|
|
||||||
buf);
|
|
||||||
if (opt_abort)
|
|
||||||
abort();
|
|
||||||
memcpy(ret, ptr, copysize);
|
|
||||||
chunk_dealloc_mmap(ptr, oldsize);
|
|
||||||
} else if (config_fill && zero == false && opt_junk && oldsize
|
|
||||||
< newsize) {
|
|
||||||
/*
|
|
||||||
* mremap(2) clobbers the original mapping, so
|
|
||||||
* junk/zero filling is not preserved. There is no
|
|
||||||
* need to zero fill here, since any trailing
|
|
||||||
* uninititialized memory is demand-zeroed by the
|
|
||||||
* kernel, but junk filling must be redone.
|
|
||||||
*/
|
|
||||||
memset(ret + oldsize, 0xa5, newsize - oldsize);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
memcpy(ret, ptr, copysize);
|
memcpy(ret, ptr, copysize);
|
||||||
iqalloct(ptr, try_tcache_dalloc);
|
iqalloct(ptr, try_tcache_dalloc);
|
||||||
}
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +157,7 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_dalloc(void *ptr, bool unmap)
|
huge_dalloc(void *ptr)
|
||||||
{
|
{
|
||||||
extent_node_t *node, key;
|
extent_node_t *node, key;
|
||||||
|
|
||||||
@ -227,20 +170,11 @@ huge_dalloc(void *ptr, bool unmap)
|
|||||||
assert(node->addr == ptr);
|
assert(node->addr == ptr);
|
||||||
extent_tree_ad_remove(&huge, node);
|
extent_tree_ad_remove(&huge, node);
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
stats_cactive_sub(node->size);
|
|
||||||
huge_ndalloc++;
|
|
||||||
huge_allocated -= node->size;
|
|
||||||
}
|
|
||||||
|
|
||||||
malloc_mutex_unlock(&huge_mtx);
|
malloc_mutex_unlock(&huge_mtx);
|
||||||
|
|
||||||
if (unmap)
|
|
||||||
huge_dalloc_junk(node->addr, node->size);
|
huge_dalloc_junk(node->addr, node->size);
|
||||||
|
arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
|
||||||
chunk_dealloc(node->addr, node->size, unmap);
|
base_node_dalloc(node);
|
||||||
|
|
||||||
base_node_dealloc(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
@ -263,13 +197,6 @@ huge_salloc(const void *ptr)
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
dss_prec_t
|
|
||||||
huge_dss_prec_get(arena_t *arena)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (arena_dss_prec_get(choose_arena(arena)));
|
|
||||||
}
|
|
||||||
|
|
||||||
prof_ctx_t *
|
prof_ctx_t *
|
||||||
huge_prof_ctx_get(const void *ptr)
|
huge_prof_ctx_get(const void *ptr)
|
||||||
{
|
{
|
||||||
@ -316,12 +243,6 @@ huge_boot(void)
|
|||||||
return (true);
|
return (true);
|
||||||
extent_tree_ad_new(&huge);
|
extent_tree_ad_new(&huge);
|
||||||
|
|
||||||
if (config_stats) {
|
|
||||||
huge_nmalloc = 0;
|
|
||||||
huge_ndalloc = 0;
|
|
||||||
huge_allocated = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1983,7 +1983,7 @@ a0alloc(size_t size, bool zero)
|
|||||||
if (size <= arena_maxclass)
|
if (size <= arena_maxclass)
|
||||||
return (arena_malloc(arenas[0], size, zero, false));
|
return (arena_malloc(arenas[0], size, zero, false));
|
||||||
else
|
else
|
||||||
return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
|
return (huge_malloc(NULL, size, zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -2012,7 +2012,7 @@ a0free(void *ptr)
|
|||||||
if (chunk != ptr)
|
if (chunk != ptr)
|
||||||
arena_dalloc(chunk, ptr, false);
|
arena_dalloc(chunk, ptr, false);
|
||||||
else
|
else
|
||||||
huge_dalloc(ptr, true);
|
huge_dalloc(ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
29
src/stats.c
29
src/stats.c
@ -213,6 +213,8 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
|
||||||
size_t large_allocated;
|
size_t large_allocated;
|
||||||
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
|
||||||
|
size_t huge_allocated;
|
||||||
|
uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
|
||||||
|
|
||||||
CTL_GET("arenas.page", &page, size_t);
|
CTL_GET("arenas.page", &page, size_t);
|
||||||
|
|
||||||
@ -249,12 +251,19 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
"large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||||
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.allocated", &huge_allocated, size_t);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.nmalloc", &huge_nmalloc, uint64_t);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.ndalloc", &huge_ndalloc, uint64_t);
|
||||||
|
CTL_I_GET("stats.arenas.0.huge.nrequests", &huge_nrequests, uint64_t);
|
||||||
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
|
"huge: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||||
|
huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
"total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
|
||||||
small_allocated + large_allocated,
|
small_allocated + large_allocated + huge_allocated,
|
||||||
small_nmalloc + large_nmalloc,
|
small_nmalloc + large_nmalloc + huge_nmalloc,
|
||||||
small_ndalloc + large_ndalloc,
|
small_ndalloc + large_ndalloc + huge_ndalloc,
|
||||||
small_nrequests + large_nrequests);
|
small_nrequests + large_nrequests + huge_nrequests);
|
||||||
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
|
malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
|
||||||
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
|
||||||
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
|
||||||
@ -458,8 +467,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
size_t allocated, active, mapped;
|
size_t allocated, active, mapped;
|
||||||
size_t chunks_current, chunks_high;
|
size_t chunks_current, chunks_high;
|
||||||
uint64_t chunks_total;
|
uint64_t chunks_total;
|
||||||
size_t huge_allocated;
|
|
||||||
uint64_t huge_nmalloc, huge_ndalloc;
|
|
||||||
|
|
||||||
CTL_GET("stats.cactive", &cactive, size_t *);
|
CTL_GET("stats.cactive", &cactive, size_t *);
|
||||||
CTL_GET("stats.allocated", &allocated, size_t);
|
CTL_GET("stats.allocated", &allocated, size_t);
|
||||||
@ -481,16 +488,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
" %13"PRIu64" %12zu %12zu\n",
|
" %13"PRIu64" %12zu %12zu\n",
|
||||||
chunks_total, chunks_high, chunks_current);
|
chunks_total, chunks_high, chunks_current);
|
||||||
|
|
||||||
/* Print huge stats. */
|
|
||||||
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
|
|
||||||
CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
|
|
||||||
CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"huge: nmalloc ndalloc allocated\n");
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
" %12"PRIu64" %12"PRIu64" %12zu\n",
|
|
||||||
huge_nmalloc, huge_ndalloc, huge_allocated);
|
|
||||||
|
|
||||||
if (merged) {
|
if (merged) {
|
||||||
unsigned narenas;
|
unsigned narenas;
|
||||||
|
|
||||||
|
58
test/integration/chunk.c
Normal file
58
test/integration/chunk.c
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
|
chunk_alloc_t *old_alloc;
|
||||||
|
chunk_dalloc_t *old_dalloc;
|
||||||
|
|
||||||
|
bool
|
||||||
|
chunk_dalloc(void *chunk, size_t size, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (old_dalloc(chunk, size, arena_ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
void *
|
||||||
|
chunk_alloc(size_t size, size_t alignment, bool *zero, unsigned arena_ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (old_alloc(size, alignment, zero, arena_ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_BEGIN(test_chunk)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
chunk_alloc_t *new_alloc;
|
||||||
|
chunk_dalloc_t *new_dalloc;
|
||||||
|
size_t old_size, new_size;
|
||||||
|
|
||||||
|
new_alloc = chunk_alloc;
|
||||||
|
new_dalloc = chunk_dalloc;
|
||||||
|
old_size = sizeof(chunk_alloc_t *);
|
||||||
|
new_size = sizeof(chunk_alloc_t *);
|
||||||
|
|
||||||
|
assert_d_eq(mallctl("arena.0.chunk.alloc", &old_alloc,
|
||||||
|
&old_size, &new_alloc, new_size), 0,
|
||||||
|
"Unexpected alloc error");
|
||||||
|
assert_ptr_ne(old_alloc, new_alloc,
|
||||||
|
"Unexpected alloc error");
|
||||||
|
assert_d_eq(mallctl("arena.0.chunk.dalloc", &old_dalloc, &old_size,
|
||||||
|
&new_dalloc, new_size), 0, "Unexpected dalloc error");
|
||||||
|
assert_ptr_ne(old_dalloc, new_dalloc, "Unexpected dalloc error");
|
||||||
|
|
||||||
|
p = mallocx(42, 0);
|
||||||
|
assert_ptr_ne(p, NULL, "Unexpected alloc error");
|
||||||
|
free(p);
|
||||||
|
|
||||||
|
assert_d_eq(mallctl("arena.0.chunk.alloc", NULL,
|
||||||
|
NULL, &old_alloc, old_size), 0,
|
||||||
|
"Unexpected alloc error");
|
||||||
|
assert_d_eq(mallctl("arena.0.chunk.dalloc", NULL, NULL, &old_dalloc,
|
||||||
|
old_size), 0, "Unexpected dalloc error");
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
int
|
||||||
|
main(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (test(test_chunk));
|
||||||
|
}
|
@ -1,45 +0,0 @@
|
|||||||
#include "test/jemalloc_test.h"
|
|
||||||
|
|
||||||
TEST_BEGIN(test_mremap)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
size_t sz, lg_chunk, chunksize, i;
|
|
||||||
char *p, *q;
|
|
||||||
|
|
||||||
sz = sizeof(lg_chunk);
|
|
||||||
err = mallctl("opt.lg_chunk", &lg_chunk, &sz, NULL, 0);
|
|
||||||
assert_d_eq(err, 0, "Error in mallctl(): %s", strerror(err));
|
|
||||||
chunksize = ((size_t)1U) << lg_chunk;
|
|
||||||
|
|
||||||
p = (char *)malloc(chunksize);
|
|
||||||
assert_ptr_not_null(p, "malloc(%zu) --> %p", chunksize, p);
|
|
||||||
memset(p, 'a', chunksize);
|
|
||||||
|
|
||||||
q = (char *)realloc(p, chunksize * 2);
|
|
||||||
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize * 2,
|
|
||||||
q);
|
|
||||||
for (i = 0; i < chunksize; i++) {
|
|
||||||
assert_c_eq(q[i], 'a',
|
|
||||||
"realloc() should preserve existing bytes across copies");
|
|
||||||
}
|
|
||||||
|
|
||||||
p = q;
|
|
||||||
|
|
||||||
q = (char *)realloc(p, chunksize);
|
|
||||||
assert_ptr_not_null(q, "realloc(%p, %zu) --> %p", p, chunksize, q);
|
|
||||||
for (i = 0; i < chunksize; i++) {
|
|
||||||
assert_c_eq(q[i], 'a',
|
|
||||||
"realloc() should preserve existing bytes across copies");
|
|
||||||
}
|
|
||||||
|
|
||||||
free(q);
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
int
|
|
||||||
main(void)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (test(
|
|
||||||
test_mremap));
|
|
||||||
}
|
|
@ -92,14 +92,11 @@ test_junk(size_t sz_min, size_t sz_max)
|
|||||||
s = (char *)rallocx(s, sz+1, 0);
|
s = (char *)rallocx(s, sz+1, 0);
|
||||||
assert_ptr_not_null((void *)s,
|
assert_ptr_not_null((void *)s,
|
||||||
"Unexpected rallocx() failure");
|
"Unexpected rallocx() failure");
|
||||||
if (!config_mremap || sz+1 <= arena_maxclass) {
|
|
||||||
assert_ptr_eq(most_recently_junked, junked,
|
assert_ptr_eq(most_recently_junked, junked,
|
||||||
"Expected region of size %zu to be "
|
"Expected region of size %zu to be junk-filled",
|
||||||
"junk-filled",
|
|
||||||
sz);
|
sz);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
dallocx(s, 0);
|
dallocx(s, 0);
|
||||||
assert_ptr_eq(most_recently_junked, (void *)s,
|
assert_ptr_eq(most_recently_junked, (void *)s,
|
||||||
|
@ -129,7 +129,6 @@ TEST_BEGIN(test_mallctl_config)
|
|||||||
TEST_MALLCTL_CONFIG(debug);
|
TEST_MALLCTL_CONFIG(debug);
|
||||||
TEST_MALLCTL_CONFIG(fill);
|
TEST_MALLCTL_CONFIG(fill);
|
||||||
TEST_MALLCTL_CONFIG(lazy_lock);
|
TEST_MALLCTL_CONFIG(lazy_lock);
|
||||||
TEST_MALLCTL_CONFIG(mremap);
|
|
||||||
TEST_MALLCTL_CONFIG(munmap);
|
TEST_MALLCTL_CONFIG(munmap);
|
||||||
TEST_MALLCTL_CONFIG(prof);
|
TEST_MALLCTL_CONFIG(prof);
|
||||||
TEST_MALLCTL_CONFIG(prof_libgcc);
|
TEST_MALLCTL_CONFIG(prof_libgcc);
|
||||||
|
@ -60,7 +60,7 @@ TEST_BEGIN(test_stats_huge)
|
|||||||
void *p;
|
void *p;
|
||||||
uint64_t epoch;
|
uint64_t epoch;
|
||||||
size_t allocated;
|
size_t allocated;
|
||||||
uint64_t nmalloc, ndalloc;
|
uint64_t nmalloc, ndalloc, nrequests;
|
||||||
size_t sz;
|
size_t sz;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
@ -71,19 +71,23 @@ TEST_BEGIN(test_stats_huge)
|
|||||||
"Unexpected mallctl() failure");
|
"Unexpected mallctl() failure");
|
||||||
|
|
||||||
sz = sizeof(size_t);
|
sz = sizeof(size_t);
|
||||||
assert_d_eq(mallctl("stats.huge.allocated", &allocated, &sz, NULL, 0),
|
assert_d_eq(mallctl("stats.arenas.0.huge.allocated", &allocated, &sz,
|
||||||
expected, "Unexpected mallctl() result");
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
sz = sizeof(uint64_t);
|
sz = sizeof(uint64_t);
|
||||||
assert_d_eq(mallctl("stats.huge.nmalloc", &nmalloc, &sz, NULL, 0),
|
assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", &nmalloc, &sz, NULL,
|
||||||
expected, "Unexpected mallctl() result");
|
0), expected, "Unexpected mallctl() result");
|
||||||
assert_d_eq(mallctl("stats.huge.ndalloc", &ndalloc, &sz, NULL, 0),
|
assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", &ndalloc, &sz, NULL,
|
||||||
expected, "Unexpected mallctl() result");
|
0), expected, "Unexpected mallctl() result");
|
||||||
|
assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", &nrequests, &sz,
|
||||||
|
NULL, 0), expected, "Unexpected mallctl() result");
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
assert_zu_gt(allocated, 0,
|
assert_zu_gt(allocated, 0,
|
||||||
"allocated should be greater than zero");
|
"allocated should be greater than zero");
|
||||||
assert_u64_ge(nmalloc, ndalloc,
|
assert_u64_ge(nmalloc, ndalloc,
|
||||||
"nmalloc should be at least as large as ndalloc");
|
"nmalloc should be at least as large as ndalloc");
|
||||||
|
assert_u64_le(nmalloc, nrequests,
|
||||||
|
"nmalloc should no larger than nrequests");
|
||||||
}
|
}
|
||||||
|
|
||||||
dallocx(p, 0);
|
dallocx(p, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user