Remove --disable-tcache.
Simplify configuration by removing the --disable-tcache option, but replace the testing for that configuration with --with-malloc-conf=tcache:false. Fix the thread.arena and thread.tcache.flush mallctls to work correctly if tcache is disabled. This partially resolves #580.
This commit is contained in:
parent
5aa46f027d
commit
4403c9ab44
14
.travis.yml
14
.travis.yml
@ -21,7 +21,7 @@ matrix:
|
|||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: osx
|
- os: osx
|
||||||
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: osx
|
- os: osx
|
||||||
@ -31,7 +31,7 @@ matrix:
|
|||||||
- os: osx
|
- os: osx
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: osx
|
- os: osx
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
@ -45,7 +45,7 @@ matrix:
|
|||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=clang CXX=clang++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
@ -65,7 +65,7 @@ matrix:
|
|||||||
packages:
|
packages:
|
||||||
- gcc-multilib
|
- gcc-multilib
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
addons:
|
addons:
|
||||||
apt:
|
apt:
|
||||||
packages:
|
packages:
|
||||||
@ -75,13 +75,13 @@ matrix:
|
|||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
- os: linux
|
- os: linux
|
||||||
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-tcache" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --with-malloc-conf=tcache:false" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
|
||||||
|
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
|
5
INSTALL
5
INSTALL
@ -153,11 +153,6 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
Statically link against the specified libunwind.a rather than dynamically
|
Statically link against the specified libunwind.a rather than dynamically
|
||||||
linking with -lunwind.
|
linking with -lunwind.
|
||||||
|
|
||||||
--disable-tcache
|
|
||||||
Disable thread-specific caches for small objects. Objects are cached and
|
|
||||||
released in bulk, thus reducing the total number of mutex operations. See
|
|
||||||
the "opt.tcache" option for usage details.
|
|
||||||
|
|
||||||
--disable-munmap
|
--disable-munmap
|
||||||
Disable virtual memory deallocation via munmap(2); instead keep track of
|
Disable virtual memory deallocation via munmap(2); instead keep track of
|
||||||
the virtual memory for later use. munmap() is disabled by default (i.e.
|
the virtual memory for later use. munmap() is disabled by default (i.e.
|
||||||
|
17
configure.ac
17
configure.ac
@ -1137,22 +1137,6 @@ if test "x$enable_prof" = "x1" ; then
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_prof])
|
AC_SUBST([enable_prof])
|
||||||
|
|
||||||
dnl Enable thread-specific caching by default.
|
|
||||||
AC_ARG_ENABLE([tcache],
|
|
||||||
[AS_HELP_STRING([--disable-tcache], [Disable per thread caches])],
|
|
||||||
[if test "x$enable_tcache" = "xno" ; then
|
|
||||||
enable_tcache="0"
|
|
||||||
else
|
|
||||||
enable_tcache="1"
|
|
||||||
fi
|
|
||||||
],
|
|
||||||
[enable_tcache="1"]
|
|
||||||
)
|
|
||||||
if test "x$enable_tcache" = "x1" ; then
|
|
||||||
AC_DEFINE([JEMALLOC_TCACHE], [ ])
|
|
||||||
fi
|
|
||||||
AC_SUBST([enable_tcache])
|
|
||||||
|
|
||||||
dnl Indicate whether adjacent virtual memory mappings automatically coalesce
|
dnl Indicate whether adjacent virtual memory mappings automatically coalesce
|
||||||
dnl (and fragment on demand).
|
dnl (and fragment on demand).
|
||||||
if test "x${maps_coalesce}" = "x1" ; then
|
if test "x${maps_coalesce}" = "x1" ; then
|
||||||
@ -2181,7 +2165,6 @@ AC_MSG_RESULT([prof : ${enable_prof}])
|
|||||||
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
|
AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
|
||||||
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
|
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
|
||||||
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
|
||||||
AC_MSG_RESULT([tcache : ${enable_tcache}])
|
|
||||||
AC_MSG_RESULT([fill : ${enable_fill}])
|
AC_MSG_RESULT([fill : ${enable_fill}])
|
||||||
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
AC_MSG_RESULT([utrace : ${enable_utrace}])
|
||||||
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
|
||||||
|
@ -510,13 +510,12 @@ for (i = 0; i < nbins; i++) {
|
|||||||
sense to reduce the number of arenas if an application does not make much
|
sense to reduce the number of arenas if an application does not make much
|
||||||
use of the allocation functions.</para>
|
use of the allocation functions.</para>
|
||||||
|
|
||||||
<para>In addition to multiple arenas, unless
|
<para>In addition to multiple arenas, this allocator supports
|
||||||
<option>--disable-tcache</option> is specified during configuration, this
|
thread-specific caching, in order to make it possible to completely avoid
|
||||||
allocator supports thread-specific caching, in order to make it possible to
|
synchronization for most allocation requests. Such caching allows very fast
|
||||||
completely avoid synchronization for most allocation requests. Such caching
|
allocation in the common case, but it increases memory usage and
|
||||||
allows very fast allocation in the common case, but it increases memory
|
fragmentation, since a bounded number of objects can remain allocated in
|
||||||
usage and fragmentation, since a bounded number of objects can remain
|
each thread cache.</para>
|
||||||
allocated in each thread cache.</para>
|
|
||||||
|
|
||||||
<para>Memory is conceptually broken into extents. Extents are always
|
<para>Memory is conceptually broken into extents. Extents are always
|
||||||
aligned to multiples of the page size. This alignment makes it possible to
|
aligned to multiples of the page size. This alignment makes it possible to
|
||||||
@ -839,16 +838,6 @@ mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
|
|||||||
build configuration.</para></listitem>
|
build configuration.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="config.tcache">
|
|
||||||
<term>
|
|
||||||
<mallctl>config.tcache</mallctl>
|
|
||||||
(<type>bool</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
</term>
|
|
||||||
<listitem><para><option>--disable-tcache</option> was not specified
|
|
||||||
during build configuration.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="config.tls">
|
<varlistentry id="config.tls">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>config.tls</mallctl>
|
<mallctl>config.tls</mallctl>
|
||||||
@ -1095,7 +1084,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>opt.tcache</mallctl>
|
<mallctl>opt.tcache</mallctl>
|
||||||
(<type>bool</type>)
|
(<type>bool</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Thread-specific caching (tcache) enabled/disabled. When
|
<listitem><para>Thread-specific caching (tcache) enabled/disabled. When
|
||||||
there are multiple threads, each thread uses a tcache for objects up to
|
there are multiple threads, each thread uses a tcache for objects up to
|
||||||
@ -1112,7 +1100,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>opt.lg_tcache_max</mallctl>
|
<mallctl>opt.lg_tcache_max</mallctl>
|
||||||
(<type>size_t</type>)
|
(<type>size_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Maximum size class (log base 2) to cache in the
|
<listitem><para>Maximum size class (log base 2) to cache in the
|
||||||
thread-specific cache (tcache). At a minimum, all small size classes
|
thread-specific cache (tcache). At a minimum, all small size classes
|
||||||
@ -1370,7 +1357,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>thread.tcache.enabled</mallctl>
|
<mallctl>thread.tcache.enabled</mallctl>
|
||||||
(<type>bool</type>)
|
(<type>bool</type>)
|
||||||
<literal>rw</literal>
|
<literal>rw</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Enable/disable calling thread's tcache. The tcache is
|
<listitem><para>Enable/disable calling thread's tcache. The tcache is
|
||||||
implicitly flushed as a side effect of becoming
|
implicitly flushed as a side effect of becoming
|
||||||
@ -1384,7 +1370,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>thread.tcache.flush</mallctl>
|
<mallctl>thread.tcache.flush</mallctl>
|
||||||
(<type>void</type>)
|
(<type>void</type>)
|
||||||
<literal>--</literal>
|
<literal>--</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Flush calling thread's thread-specific cache (tcache).
|
<listitem><para>Flush calling thread's thread-specific cache (tcache).
|
||||||
This interface releases all cached objects and internal data structures
|
This interface releases all cached objects and internal data structures
|
||||||
@ -1440,7 +1425,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>tcache.create</mallctl>
|
<mallctl>tcache.create</mallctl>
|
||||||
(<type>unsigned</type>)
|
(<type>unsigned</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Create an explicit thread-specific cache (tcache) and
|
<listitem><para>Create an explicit thread-specific cache (tcache) and
|
||||||
return an identifier that can be passed to the <link
|
return an identifier that can be passed to the <link
|
||||||
@ -1457,7 +1441,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>tcache.flush</mallctl>
|
<mallctl>tcache.flush</mallctl>
|
||||||
(<type>unsigned</type>)
|
(<type>unsigned</type>)
|
||||||
<literal>-w</literal>
|
<literal>-w</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Flush the specified thread-specific cache (tcache). The
|
<listitem><para>Flush the specified thread-specific cache (tcache). The
|
||||||
same considerations apply to this interface as to <link
|
same considerations apply to this interface as to <link
|
||||||
@ -1471,7 +1454,6 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
|||||||
<mallctl>tcache.destroy</mallctl>
|
<mallctl>tcache.destroy</mallctl>
|
||||||
(<type>unsigned</type>)
|
(<type>unsigned</type>)
|
||||||
<literal>-w</literal>
|
<literal>-w</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Flush the specified thread-specific cache (tcache) and
|
<listitem><para>Flush the specified thread-specific cache (tcache) and
|
||||||
make the identifier available for use during a future tcache creation.
|
make the identifier available for use during a future tcache creation.
|
||||||
@ -1873,7 +1855,6 @@ struct extent_hooks_s {
|
|||||||
<mallctl>arenas.tcache_max</mallctl>
|
<mallctl>arenas.tcache_max</mallctl>
|
||||||
(<type>size_t</type>)
|
(<type>size_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Maximum thread-cached size class.</para></listitem>
|
<listitem><para>Maximum thread-cached size class.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
@ -1892,7 +1873,6 @@ struct extent_hooks_s {
|
|||||||
<mallctl>arenas.nhbins</mallctl>
|
<mallctl>arenas.nhbins</mallctl>
|
||||||
(<type>unsigned</type>)
|
(<type>unsigned</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Total number of thread cache bin size
|
<listitem><para>Total number of thread cache bin size
|
||||||
classes.</para></listitem>
|
classes.</para></listitem>
|
||||||
@ -2575,7 +2555,6 @@ struct extent_hooks_s {
|
|||||||
<mallctl>stats.arenas.<i>.bins.<j>.nfills</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.nfills</mallctl>
|
||||||
(<type>uint64_t</type>)
|
(<type>uint64_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-stats</option> <option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Cumulative number of tcache fills.</para></listitem>
|
<listitem><para>Cumulative number of tcache fills.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
@ -2585,7 +2564,6 @@ struct extent_hooks_s {
|
|||||||
<mallctl>stats.arenas.<i>.bins.<j>.nflushes</mallctl>
|
<mallctl>stats.arenas.<i>.bins.<j>.nflushes</mallctl>
|
||||||
(<type>uint64_t</type>)
|
(<type>uint64_t</type>)
|
||||||
<literal>r-</literal>
|
<literal>r-</literal>
|
||||||
[<option>--enable-stats</option> <option>--enable-tcache</option>]
|
|
||||||
</term>
|
</term>
|
||||||
<listitem><para>Cumulative number of tcache flushes.</para></listitem>
|
<listitem><para>Cumulative number of tcache flushes.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
@ -58,7 +58,7 @@ percpu_arena_update(tsd_t *tsd, unsigned cpu) {
|
|||||||
/* Set new arena/tcache associations. */
|
/* Set new arena/tcache associations. */
|
||||||
arena_migrate(tsd, oldind, newind);
|
arena_migrate(tsd, oldind, newind);
|
||||||
tcache_t *tcache = tcache_get(tsd);
|
tcache_t *tcache = tcache_get(tsd);
|
||||||
if (config_tcache && tcache) {
|
if (tcache != NULL) {
|
||||||
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
|
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
|
||||||
newarena);
|
newarena);
|
||||||
}
|
}
|
||||||
|
@ -154,13 +154,6 @@
|
|||||||
/* Use gcc intrinsics for profile backtracing if defined. */
|
/* Use gcc intrinsics for profile backtracing if defined. */
|
||||||
#undef JEMALLOC_PROF_GCC
|
#undef JEMALLOC_PROF_GCC
|
||||||
|
|
||||||
/*
|
|
||||||
* JEMALLOC_TCACHE enables a thread-specific caching layer for small objects.
|
|
||||||
* This makes it possible to allocate/deallocate objects without any locking
|
|
||||||
* when the cache is in the steady state.
|
|
||||||
*/
|
|
||||||
#undef JEMALLOC_TCACHE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
* JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
|
||||||
* segment (DSS).
|
* segment (DSS).
|
||||||
|
@ -323,7 +323,8 @@ malloc_getcpu(void) {
|
|||||||
JEMALLOC_ALWAYS_INLINE unsigned
|
JEMALLOC_ALWAYS_INLINE unsigned
|
||||||
percpu_arena_choose(void) {
|
percpu_arena_choose(void) {
|
||||||
unsigned arena_ind;
|
unsigned arena_ind;
|
||||||
assert(have_percpu_arena && (percpu_arena_mode != percpu_arena_disabled));
|
assert(have_percpu_arena && (percpu_arena_mode !=
|
||||||
|
percpu_arena_disabled));
|
||||||
|
|
||||||
malloc_cpuid_t cpuid = malloc_getcpu();
|
malloc_cpuid_t cpuid = malloc_getcpu();
|
||||||
assert(cpuid >= 0);
|
assert(cpuid >= 0);
|
||||||
@ -420,19 +421,16 @@ tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
tcache_available(tsd_t *tsd) {
|
tcache_available(tsd_t *tsd) {
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
* Thread specific auto tcache might be unavailable if: 1) during tcache
|
||||||
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
* initialization, or 2) disabled through thread.tcache.enabled mallctl
|
||||||
* or config options. This check covers all cases.
|
* or config options. This check covers all cases.
|
||||||
*/
|
*/
|
||||||
if (likely(tsd_tcache_enabled_get(tsd) == true)) {
|
if (likely(tsd_tcache_enabled_get(tsd))) {
|
||||||
/* Associated arena == null implies tcache init in progress. */
|
/* Associated arena == NULL implies tcache init in progress. */
|
||||||
if (tsd_tcachep_get(tsd)->arena != NULL) {
|
assert(tsd_tcachep_get(tsd)->arena == NULL ||
|
||||||
assert(tcache_small_bin_get(tsd_tcachep_get(tsd),
|
tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
|
||||||
0)->avail != NULL);
|
NULL);
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -441,9 +439,6 @@ tcache_available(tsd_t *tsd) {
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE tcache_t *
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
||||||
tcache_get(tsd_t *tsd) {
|
tcache_get(tsd_t *tsd) {
|
||||||
if (!config_tcache) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
if (!tcache_available(tsd)) {
|
if (!tcache_available(tsd)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
|||||||
if (unlikely(ret == NULL)) {
|
if (unlikely(ret == NULL)) {
|
||||||
ret = arena_choose_hard(tsd, internal);
|
ret = arena_choose_hard(tsd, internal);
|
||||||
assert(ret);
|
assert(ret);
|
||||||
if (config_tcache && tcache_available(tsd)) {
|
if (tcache_available(tsd)) {
|
||||||
tcache_t *tcache = tcache_get(tsd);
|
tcache_t *tcache = tcache_get(tsd);
|
||||||
if (tcache->arena != NULL) {
|
if (tcache->arena != NULL) {
|
||||||
/* See comments in tcache_data_init().*/
|
/* See comments in tcache_data_init().*/
|
||||||
|
@ -111,13 +111,6 @@ static const bool config_stats =
|
|||||||
false
|
false
|
||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
static const bool config_tcache =
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
true
|
|
||||||
#else
|
|
||||||
false
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
static const bool config_tls =
|
static const bool config_tls =
|
||||||
#ifdef JEMALLOC_TLS
|
#ifdef JEMALLOC_TLS
|
||||||
true
|
true
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
void tcache_event(tsd_t *tsd, tcache_t *tcache);
|
void tcache_event(tsd_t *tsd, tcache_t *tcache);
|
||||||
void tcache_flush(void);
|
|
||||||
bool tcache_enabled_get(tsd_t *tsd);
|
bool tcache_enabled_get(tsd_t *tsd);
|
||||||
tcache_t *tcache_get(tsd_t *tsd);
|
tcache_t *tcache_get(tsd_t *tsd);
|
||||||
void tcache_enabled_set(tsd_t *tsd, bool enabled);
|
void tcache_enabled_set(tsd_t *tsd, bool enabled);
|
||||||
@ -25,15 +24,11 @@ tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
|||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
||||||
JEMALLOC_INLINE bool
|
JEMALLOC_INLINE bool
|
||||||
tcache_enabled_get(tsd_t *tsd) {
|
tcache_enabled_get(tsd_t *tsd) {
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
return tsd_tcache_enabled_get(tsd);
|
return tsd_tcache_enabled_get(tsd);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
||||||
|
|
||||||
if (!was_enabled && enabled) {
|
if (!was_enabled && enabled) {
|
||||||
|
@ -40,23 +40,14 @@ struct tcache_s {
|
|||||||
* element of tbins is initialized to point to the proper offset within
|
* element of tbins is initialized to point to the proper offset within
|
||||||
* this array.
|
* this array.
|
||||||
*/
|
*/
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
tcache_bin_t tbins_small[NBINS];
|
tcache_bin_t tbins_small[NBINS];
|
||||||
#else
|
|
||||||
tcache_bin_t tbins_small[0];
|
|
||||||
#endif
|
|
||||||
/* Data accessed less often below. */
|
/* Data accessed less often below. */
|
||||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||||
arena_t *arena; /* Associated arena. */
|
arena_t *arena; /* Associated arena. */
|
||||||
szind_t next_gc_bin; /* Next bin to GC. */
|
szind_t next_gc_bin; /* Next bin to GC. */
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
/* For small bins, fill (ncached_max >> lg_fill_div). */
|
/* For small bins, fill (ncached_max >> lg_fill_div). */
|
||||||
uint8_t lg_fill_div[NBINS];
|
uint8_t lg_fill_div[NBINS];
|
||||||
tcache_bin_t tbins_large[NSIZES-NBINS];
|
tcache_bin_t tbins_large[NSIZES-NBINS];
|
||||||
#else
|
|
||||||
uint8_t lg_fill_div[0];
|
|
||||||
tcache_bin_t tbins_large[0];
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
/* Linkage for list of available (previously used) explicit tcache IDs. */
|
||||||
|
@ -18,7 +18,7 @@ possible_config_opts = [
|
|||||||
'--enable-debug',
|
'--enable-debug',
|
||||||
'--enable-prof',
|
'--enable-prof',
|
||||||
'--disable-stats',
|
'--disable-stats',
|
||||||
'--disable-tcache',
|
'--with-malloc-conf=tcache:false',
|
||||||
]
|
]
|
||||||
|
|
||||||
print 'set -e'
|
print 'set -e'
|
||||||
|
@ -24,11 +24,11 @@ script:
|
|||||||
|
|
||||||
# The 'default' configuration is gcc, on linux, with no compiler or configure
|
# The 'default' configuration is gcc, on linux, with no compiler or configure
|
||||||
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
|
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
|
||||||
# --disable-stats, and --disable-tcache. To avoid abusing travis though, we
|
# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
|
||||||
# don't test all 2**7 = 128 possible combinations of these; instead, we only
|
# travis though, we don't test all 2**7 = 128 possible combinations of these;
|
||||||
# test combinations of up to 2 'unusual' settings, under the hope that bugs
|
# instead, we only test combinations of up to 2 'unusual' settings, under the
|
||||||
# involving interactions of such settings are rare.
|
# hope that bugs involving interactions of such settings are rare.
|
||||||
# things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
|
# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
|
||||||
MAX_UNUSUAL_OPTIONS = 2
|
MAX_UNUSUAL_OPTIONS = 2
|
||||||
|
|
||||||
os_default = 'linux'
|
os_default = 'linux'
|
||||||
@ -40,7 +40,10 @@ compilers_unusual = 'CC=clang CXX=clang++'
|
|||||||
compiler_flag_unusuals = ['-m32']
|
compiler_flag_unusuals = ['-m32']
|
||||||
|
|
||||||
configure_flag_unusuals = [
|
configure_flag_unusuals = [
|
||||||
'--enable-debug', '--enable-prof', '--disable-stats', '--disable-tcache',
|
'--enable-debug',
|
||||||
|
'--enable-prof',
|
||||||
|
'--disable-stats',
|
||||||
|
'--with-malloc-conf=tcache:false',
|
||||||
]
|
]
|
||||||
|
|
||||||
all_unusuals = (
|
all_unusuals = (
|
||||||
|
56
src/arena.c
56
src/arena.c
@ -283,31 +283,27 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
|
|
||||||
arena_stats_unlock(tsdn, &arena->stats);
|
arena_stats_unlock(tsdn, &arena->stats);
|
||||||
|
|
||||||
if (config_tcache) {
|
/* tcache_bytes counts currently cached bytes. */
|
||||||
tcache_bin_t *tbin;
|
atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
|
||||||
tcache_t *tcache;
|
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||||
|
tcache_t *tcache;
|
||||||
/* tcache_bytes counts currently cached bytes. */
|
ql_foreach(tcache, &arena->tcache_ql, link) {
|
||||||
atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
|
szind_t i = 0;
|
||||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
for (; i < NBINS; i++) {
|
||||||
ql_foreach(tcache, &arena->tcache_ql, link) {
|
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||||
szind_t i = 0;
|
arena_stats_accum_zu(&astats->tcache_bytes,
|
||||||
for (; i < NBINS; i++) {
|
tbin->ncached * index2size(i));
|
||||||
tbin = tcache_small_bin_get(tcache, i);
|
}
|
||||||
arena_stats_accum_zu(&astats->tcache_bytes,
|
for (; i < nhbins; i++) {
|
||||||
tbin->ncached * index2size(i));
|
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||||
}
|
arena_stats_accum_zu(&astats->tcache_bytes,
|
||||||
for (; i < nhbins; i++) {
|
tbin->ncached * index2size(i));
|
||||||
tbin = tcache_large_bin_get(tcache, i);
|
|
||||||
arena_stats_accum_zu(&astats->tcache_bytes,
|
|
||||||
tbin->ncached * index2size(i));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
malloc_mutex_prof_read(tsdn,
|
|
||||||
&astats->mutex_prof_data[arena_prof_mutex_tcache_list],
|
|
||||||
&arena->tcache_ql_mtx);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
|
||||||
}
|
}
|
||||||
|
malloc_mutex_prof_read(tsdn,
|
||||||
|
&astats->mutex_prof_data[arena_prof_mutex_tcache_list],
|
||||||
|
&arena->tcache_ql_mtx);
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
|
||||||
|
|
||||||
#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
|
#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
|
||||||
malloc_mutex_lock(tsdn, &arena->mtx); \
|
malloc_mutex_lock(tsdn, &arena->mtx); \
|
||||||
@ -342,10 +338,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
|||||||
bstats[i].ndalloc += bin->stats.ndalloc;
|
bstats[i].ndalloc += bin->stats.ndalloc;
|
||||||
bstats[i].nrequests += bin->stats.nrequests;
|
bstats[i].nrequests += bin->stats.nrequests;
|
||||||
bstats[i].curregs += bin->stats.curregs;
|
bstats[i].curregs += bin->stats.curregs;
|
||||||
if (config_tcache) {
|
bstats[i].nfills += bin->stats.nfills;
|
||||||
bstats[i].nfills += bin->stats.nfills;
|
bstats[i].nflushes += bin->stats.nflushes;
|
||||||
bstats[i].nflushes += bin->stats.nflushes;
|
|
||||||
}
|
|
||||||
bstats[i].nslabs += bin->stats.nslabs;
|
bstats[i].nslabs += bin->stats.nslabs;
|
||||||
bstats[i].reslabs += bin->stats.reslabs;
|
bstats[i].reslabs += bin->stats.reslabs;
|
||||||
bstats[i].curslabs += bin->stats.curslabs;
|
bstats[i].curslabs += bin->stats.curslabs;
|
||||||
@ -1867,9 +1861,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
|
|||||||
if (arena_stats_init(tsdn, &arena->stats)) {
|
if (arena_stats_init(tsdn, &arena->stats)) {
|
||||||
goto label_error;
|
goto label_error;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (config_stats && config_tcache) {
|
|
||||||
ql_new(&arena->tcache_ql);
|
ql_new(&arena->tcache_ql);
|
||||||
if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
|
if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
|
||||||
WITNESS_RANK_TCACHE_QL)) {
|
WITNESS_RANK_TCACHE_QL)) {
|
||||||
@ -2007,7 +1999,7 @@ arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
|
|
||||||
void
|
void
|
||||||
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
|
arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
|
||||||
if (config_stats && config_tcache) {
|
if (config_stats) {
|
||||||
malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
|
malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2056,7 +2048,7 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
extents_postfork_parent(tsdn, &arena->extents_retained);
|
extents_postfork_parent(tsdn, &arena->extents_retained);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
|
||||||
if (config_stats && config_tcache) {
|
if (config_stats) {
|
||||||
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
|
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2076,7 +2068,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
extents_postfork_child(tsdn, &arena->extents_retained);
|
extents_postfork_child(tsdn, &arena->extents_retained);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
|
||||||
if (config_stats && config_tcache) {
|
if (config_stats) {
|
||||||
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
|
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
66
src/ctl.c
66
src/ctl.c
@ -70,7 +70,6 @@ CTL_PROTO(config_prof)
|
|||||||
CTL_PROTO(config_prof_libgcc)
|
CTL_PROTO(config_prof_libgcc)
|
||||||
CTL_PROTO(config_prof_libunwind)
|
CTL_PROTO(config_prof_libunwind)
|
||||||
CTL_PROTO(config_stats)
|
CTL_PROTO(config_stats)
|
||||||
CTL_PROTO(config_tcache)
|
|
||||||
CTL_PROTO(config_tls)
|
CTL_PROTO(config_tls)
|
||||||
CTL_PROTO(config_utrace)
|
CTL_PROTO(config_utrace)
|
||||||
CTL_PROTO(config_xmalloc)
|
CTL_PROTO(config_xmalloc)
|
||||||
@ -255,7 +254,6 @@ static const ctl_named_node_t config_node[] = {
|
|||||||
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
|
||||||
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
|
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
|
||||||
{NAME("stats"), CTL(config_stats)},
|
{NAME("stats"), CTL(config_stats)},
|
||||||
{NAME("tcache"), CTL(config_tcache)},
|
|
||||||
{NAME("tls"), CTL(config_tls)},
|
{NAME("tls"), CTL(config_tls)},
|
||||||
{NAME("utrace"), CTL(config_utrace)},
|
{NAME("utrace"), CTL(config_utrace)},
|
||||||
{NAME("xmalloc"), CTL(config_xmalloc)}
|
{NAME("xmalloc"), CTL(config_xmalloc)}
|
||||||
@ -777,10 +775,8 @@ ARENA_PROF_MUTEXES
|
|||||||
accum_arena_stats_u64(&sdstats->astats.nrequests_large,
|
accum_arena_stats_u64(&sdstats->astats.nrequests_large,
|
||||||
&astats->astats.nrequests_large);
|
&astats->astats.nrequests_large);
|
||||||
|
|
||||||
if (config_tcache) {
|
accum_atomic_zu(&sdstats->astats.tcache_bytes,
|
||||||
accum_atomic_zu(&sdstats->astats.tcache_bytes,
|
&astats->astats.tcache_bytes);
|
||||||
&astats->astats.tcache_bytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
|
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
|
||||||
@ -793,12 +789,9 @@ ARENA_PROF_MUTEXES
|
|||||||
} else {
|
} else {
|
||||||
assert(astats->bstats[i].curregs == 0);
|
assert(astats->bstats[i].curregs == 0);
|
||||||
}
|
}
|
||||||
if (config_tcache) {
|
sdstats->bstats[i].nfills += astats->bstats[i].nfills;
|
||||||
sdstats->bstats[i].nfills +=
|
sdstats->bstats[i].nflushes +=
|
||||||
astats->bstats[i].nfills;
|
astats->bstats[i].nflushes;
|
||||||
sdstats->bstats[i].nflushes +=
|
|
||||||
astats->bstats[i].nflushes;
|
|
||||||
}
|
|
||||||
sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
|
sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
|
||||||
sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
|
sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
|
||||||
if (!destroyed) {
|
if (!destroyed) {
|
||||||
@ -1457,7 +1450,6 @@ CTL_RO_CONFIG_GEN(config_prof, bool)
|
|||||||
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
|
CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
|
||||||
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
|
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
|
||||||
CTL_RO_CONFIG_GEN(config_stats, bool)
|
CTL_RO_CONFIG_GEN(config_stats, bool)
|
||||||
CTL_RO_CONFIG_GEN(config_tcache, bool)
|
|
||||||
CTL_RO_CONFIG_GEN(config_tls, bool)
|
CTL_RO_CONFIG_GEN(config_tls, bool)
|
||||||
CTL_RO_CONFIG_GEN(config_utrace, bool)
|
CTL_RO_CONFIG_GEN(config_utrace, bool)
|
||||||
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
CTL_RO_CONFIG_GEN(config_xmalloc, bool)
|
||||||
@ -1475,8 +1467,8 @@ CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
|
|||||||
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||||
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
||||||
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
||||||
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
|
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
|
||||||
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
|
||||||
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
|
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
|
||||||
@ -1536,12 +1528,9 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|||||||
}
|
}
|
||||||
/* Set new arena/tcache associations. */
|
/* Set new arena/tcache associations. */
|
||||||
arena_migrate(tsd, oldind, newind);
|
arena_migrate(tsd, oldind, newind);
|
||||||
if (config_tcache) {
|
if (tcache_available(tsd)) {
|
||||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
tcache_arena_reassociate(tsd_tsdn(tsd),
|
||||||
if (tcache != NULL) {
|
tsd_tcachep_get(tsd), newarena);
|
||||||
tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
|
|
||||||
newarena);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1565,10 +1554,6 @@ thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|||||||
int ret;
|
int ret;
|
||||||
bool oldval;
|
bool oldval;
|
||||||
|
|
||||||
if (!config_tcache) {
|
|
||||||
return ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
oldval = tcache_enabled_get(tsd);
|
oldval = tcache_enabled_get(tsd);
|
||||||
if (newp != NULL) {
|
if (newp != NULL) {
|
||||||
if (newlen != sizeof(bool)) {
|
if (newlen != sizeof(bool)) {
|
||||||
@ -1589,8 +1574,9 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|||||||
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
|
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!config_tcache) {
|
if (!tcache_available(tsd)) {
|
||||||
return ENOENT;
|
ret = EFAULT;
|
||||||
|
goto label_return;
|
||||||
}
|
}
|
||||||
|
|
||||||
READONLY();
|
READONLY();
|
||||||
@ -1670,10 +1656,6 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned tcache_ind;
|
unsigned tcache_ind;
|
||||||
|
|
||||||
if (!config_tcache) {
|
|
||||||
return ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
READONLY();
|
READONLY();
|
||||||
if (tcaches_create(tsd, &tcache_ind)) {
|
if (tcaches_create(tsd, &tcache_ind)) {
|
||||||
ret = EFAULT;
|
ret = EFAULT;
|
||||||
@ -1692,10 +1674,6 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned tcache_ind;
|
unsigned tcache_ind;
|
||||||
|
|
||||||
if (!config_tcache) {
|
|
||||||
return ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
WRITEONLY();
|
WRITEONLY();
|
||||||
tcache_ind = UINT_MAX;
|
tcache_ind = UINT_MAX;
|
||||||
WRITE(tcache_ind, unsigned);
|
WRITE(tcache_ind, unsigned);
|
||||||
@ -1716,10 +1694,6 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
|
|||||||
int ret;
|
int ret;
|
||||||
unsigned tcache_ind;
|
unsigned tcache_ind;
|
||||||
|
|
||||||
if (!config_tcache) {
|
|
||||||
return ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
WRITEONLY();
|
WRITEONLY();
|
||||||
tcache_ind = UINT_MAX;
|
tcache_ind = UINT_MAX;
|
||||||
WRITE(tcache_ind, unsigned);
|
WRITE(tcache_ind, unsigned);
|
||||||
@ -2150,9 +2124,9 @@ arenas_muzzy_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|||||||
|
|
||||||
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
|
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
|
||||||
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
||||||
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
|
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
|
||||||
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
||||||
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
|
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
|
||||||
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
|
||||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
|
||||||
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
|
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
|
||||||
@ -2380,7 +2354,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_base,
|
|||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
|
||||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
|
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
|
||||||
size_t)
|
size_t)
|
||||||
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_tcache_bytes,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
|
||||||
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
|
atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
|
||||||
ATOMIC_RELAXED), size_t)
|
ATOMIC_RELAXED), size_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
|
||||||
@ -2480,9 +2454,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
|
|||||||
MUTEX_PROF_RESET(arena->extents_retained.mtx);
|
MUTEX_PROF_RESET(arena->extents_retained.mtx);
|
||||||
MUTEX_PROF_RESET(arena->decay_dirty.mtx);
|
MUTEX_PROF_RESET(arena->decay_dirty.mtx);
|
||||||
MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
|
MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
|
||||||
if (config_tcache) {
|
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
|
||||||
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
|
|
||||||
}
|
|
||||||
MUTEX_PROF_RESET(arena->base->mtx);
|
MUTEX_PROF_RESET(arena->base->mtx);
|
||||||
|
|
||||||
for (szind_t i = 0; i < NBINS; i++) {
|
for (szind_t i = 0; i < NBINS; i++) {
|
||||||
@ -2502,9 +2474,9 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
|
|||||||
arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
|
arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
|
||||||
arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
|
arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
|
||||||
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
|
||||||
arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
|
arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
|
||||||
arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
|
arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
|
||||||
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
|
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
|
||||||
arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
|
arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
|
||||||
|
@ -682,7 +682,7 @@ arenas_tdata_cleanup(tsd_t *tsd) {
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
stats_print_atexit(void) {
|
stats_print_atexit(void) {
|
||||||
if (config_tcache && config_stats) {
|
if (config_stats) {
|
||||||
tsdn_t *tsdn;
|
tsdn_t *tsdn;
|
||||||
unsigned narenas, i;
|
unsigned narenas, i;
|
||||||
|
|
||||||
@ -1106,12 +1106,9 @@ malloc_conf_init(void) {
|
|||||||
if (config_xmalloc) {
|
if (config_xmalloc) {
|
||||||
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
|
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
|
||||||
}
|
}
|
||||||
if (config_tcache) {
|
CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
|
||||||
CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
|
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
|
||||||
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
|
-1, (sizeof(size_t) << 3) - 1)
|
||||||
"lg_tcache_max", -1,
|
|
||||||
(sizeof(size_t) << 3) - 1)
|
|
||||||
}
|
|
||||||
if (strncmp("percpu_arena", k, klen) == 0) {
|
if (strncmp("percpu_arena", k, klen) == 0) {
|
||||||
int i;
|
int i;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
@ -1236,7 +1233,7 @@ malloc_init_hard_a0_locked() {
|
|||||||
prof_boot1();
|
prof_boot1();
|
||||||
}
|
}
|
||||||
arena_boot();
|
arena_boot();
|
||||||
if (config_tcache && tcache_boot(TSDN_NULL)) {
|
if (tcache_boot(TSDN_NULL)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) {
|
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) {
|
||||||
|
114
src/stats.c
114
src/stats.c
@ -128,20 +128,11 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
} else {
|
} else {
|
||||||
char *mutex_counters = " n_lock_ops n_waiting"
|
char *mutex_counters = " n_lock_ops n_waiting"
|
||||||
" n_spin_acq max_wait_ns\n";
|
" n_spin_acq max_wait_ns\n";
|
||||||
if (config_tcache) {
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
"bins: size ind allocated nmalloc"
|
||||||
"bins: size ind allocated nmalloc"
|
" ndalloc nrequests curregs curslabs regs"
|
||||||
" ndalloc nrequests curregs"
|
" pgs util nfills nflushes newslabs"
|
||||||
" curslabs regs pgs util nfills"
|
" reslabs%s", mutex ? mutex_counters : "\n");
|
||||||
" nflushes newslabs reslabs%s",
|
|
||||||
mutex ? mutex_counters : "\n");
|
|
||||||
} else {
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"bins: size ind allocated nmalloc"
|
|
||||||
" ndalloc nrequests curregs"
|
|
||||||
" curslabs regs pgs util newslabs"
|
|
||||||
" reslabs%s", mutex ? mutex_counters : "\n");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
for (j = 0, in_gap = false; j < nbins; j++) {
|
for (j = 0, in_gap = false; j < nbins; j++) {
|
||||||
uint64_t nslabs;
|
uint64_t nslabs;
|
||||||
@ -173,12 +164,10 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
size_t);
|
size_t);
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
|
||||||
&nrequests, uint64_t);
|
&nrequests, uint64_t);
|
||||||
if (config_tcache) {
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
|
uint64_t);
|
||||||
&nfills, uint64_t);
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
|
uint64_t);
|
||||||
&nflushes, uint64_t);
|
|
||||||
}
|
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
|
||||||
uint64_t);
|
uint64_t);
|
||||||
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
|
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
|
||||||
@ -190,23 +179,13 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
|
"\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
|
||||||
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
|
"\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
|
||||||
"\t\t\t\t\t\t\"curregs\": %zu,\n"
|
"\t\t\t\t\t\t\"curregs\": %zu,\n"
|
||||||
"\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
|
"\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n"
|
||||||
nmalloc,
|
"\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
|
||||||
ndalloc,
|
"\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n"
|
||||||
curregs,
|
|
||||||
nrequests);
|
|
||||||
if (config_tcache) {
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
|
|
||||||
"\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
|
|
||||||
nfills,
|
|
||||||
nflushes);
|
|
||||||
}
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
|
"\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
|
||||||
"\t\t\t\t\t\t\"curslabs\": %zu%s\n",
|
"\t\t\t\t\t\t\"curslabs\": %zu%s\n",
|
||||||
nreslabs, curslabs, mutex ? "," : "");
|
nmalloc, ndalloc, curregs, nrequests, nfills,
|
||||||
|
nflushes, nreslabs, curslabs, mutex ? "," : "");
|
||||||
if (mutex) {
|
if (mutex) {
|
||||||
uint64_t mutex_stats[num_mutex_prof_counters];
|
uint64_t mutex_stats[num_mutex_prof_counters];
|
||||||
read_arena_bin_mutex_stats(i, j, mutex_stats);
|
read_arena_bin_mutex_stats(i, j, mutex_stats);
|
||||||
@ -260,27 +239,13 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_tcache) {
|
malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
FMTu64" %12"FMTu64" %12"FMTu64" %12zu %12zu %4u"
|
||||||
"%20zu %3u %12zu %12"FMTu64
|
" %3zu %-5s %12"FMTu64" %12"FMTu64" %12"FMTu64
|
||||||
" %12"FMTu64" %12"FMTu64" %12zu"
|
" %12"FMTu64, reg_size, j, curregs * reg_size,
|
||||||
" %12zu %4u %3zu %-5s %12"FMTu64
|
nmalloc, ndalloc, nrequests, curregs, curslabs,
|
||||||
" %12"FMTu64" %12"FMTu64" %12"FMTu64,
|
nregs, slab_size / page, util, nfills, nflushes,
|
||||||
reg_size, j, curregs * reg_size, nmalloc,
|
nslabs, nreslabs);
|
||||||
ndalloc, nrequests, curregs, curslabs,
|
|
||||||
nregs, slab_size / page, util, nfills,
|
|
||||||
nflushes, nslabs, nreslabs);
|
|
||||||
} else {
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"%20zu %3u %12zu %12"FMTu64
|
|
||||||
" %12"FMTu64" %12"FMTu64" %12zu"
|
|
||||||
" %12zu %4u %3zu %-5s %12"FMTu64
|
|
||||||
" %12"FMTu64,
|
|
||||||
reg_size, j, curregs * reg_size, nmalloc,
|
|
||||||
ndalloc, nrequests, curregs, curslabs,
|
|
||||||
nregs, slab_size / page, util, nslabs,
|
|
||||||
nreslabs);
|
|
||||||
}
|
|
||||||
if (mutex) {
|
if (mutex) {
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
" %12"FMTu64" %12"FMTu64" %12"FMTu64
|
" %12"FMTu64" %12"FMTu64" %12"FMTu64
|
||||||
@ -423,14 +388,7 @@ stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
|
|||||||
malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n");
|
malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n");
|
||||||
arena_prof_mutex_ind_t i, last_mutex;
|
arena_prof_mutex_ind_t i, last_mutex;
|
||||||
last_mutex = num_arena_prof_mutexes - 1;
|
last_mutex = num_arena_prof_mutexes - 1;
|
||||||
if (!config_tcache) {
|
|
||||||
last_mutex--;
|
|
||||||
}
|
|
||||||
for (i = 0; i < num_arena_prof_mutexes; i++) {
|
for (i = 0; i < num_arena_prof_mutexes; i++) {
|
||||||
if (!config_tcache &&
|
|
||||||
i == arena_prof_mutex_tcache_list) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
mutex_stats_output_json(write_cb, cbopaque,
|
mutex_stats_output_json(write_cb, cbopaque,
|
||||||
arena_mutex_names[i], mutex_stats[i],
|
arena_mutex_names[i], mutex_stats[i],
|
||||||
"\t\t\t\t\t", (i == last_mutex));
|
"\t\t\t\t\t", (i == last_mutex));
|
||||||
@ -440,10 +398,6 @@ stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
|
|||||||
} else {
|
} else {
|
||||||
arena_prof_mutex_ind_t i;
|
arena_prof_mutex_ind_t i;
|
||||||
for (i = 0; i < num_arena_prof_mutexes; i++) {
|
for (i = 0; i < num_arena_prof_mutexes; i++) {
|
||||||
if (!config_tcache &&
|
|
||||||
i == arena_prof_mutex_tcache_list) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
mutex_stats_output(write_cb, cbopaque,
|
mutex_stats_output(write_cb, cbopaque,
|
||||||
arena_mutex_names[i], mutex_stats[i], i == 0);
|
arena_mutex_names[i], mutex_stats[i], i == 0);
|
||||||
}
|
}
|
||||||
@ -659,16 +613,13 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
"internal: %12zu\n", internal);
|
"internal: %12zu\n", internal);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config_tcache) {
|
CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes, size_t);
|
||||||
CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes,
|
if (json) {
|
||||||
size_t);
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
if (json) {
|
"\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
} else {
|
||||||
"\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
} else {
|
"tcache: %12zu\n", tcache_bytes);
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
|
||||||
"tcache: %12zu\n", tcache_bytes);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
|
CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
|
||||||
@ -761,7 +712,6 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
|
CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
|
||||||
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
|
CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
|
||||||
CONFIG_WRITE_BOOL_JSON(stats, ",")
|
CONFIG_WRITE_BOOL_JSON(stats, ",")
|
||||||
CONFIG_WRITE_BOOL_JSON(tcache, ",")
|
|
||||||
CONFIG_WRITE_BOOL_JSON(tls, ",")
|
CONFIG_WRITE_BOOL_JSON(tls, ",")
|
||||||
CONFIG_WRITE_BOOL_JSON(utrace, ",")
|
CONFIG_WRITE_BOOL_JSON(utrace, ",")
|
||||||
CONFIG_WRITE_BOOL_JSON(xmalloc, "")
|
CONFIG_WRITE_BOOL_JSON(xmalloc, "")
|
||||||
@ -959,11 +909,9 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"\t\t\t\"nbins\": %u,\n", nbins);
|
"\t\t\t\"nbins\": %u,\n", nbins);
|
||||||
|
|
||||||
if (config_tcache) {
|
CTL_GET("arenas.nhbins", &uv, unsigned);
|
||||||
CTL_GET("arenas.nhbins", &uv, unsigned);
|
malloc_cprintf(write_cb, cbopaque, "\t\t\t\"nhbins\": %u,\n",
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
uv);
|
||||||
"\t\t\t\"nhbins\": %u,\n", uv);
|
|
||||||
}
|
|
||||||
|
|
||||||
malloc_cprintf(write_cb, cbopaque,
|
malloc_cprintf(write_cb, cbopaque,
|
||||||
"\t\t\t\"bin\": [\n");
|
"\t\t\t\"bin\": [\n");
|
||||||
|
44
src/tcache.c
44
src/tcache.c
@ -7,13 +7,7 @@
|
|||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
bool opt_tcache =
|
bool opt_tcache = true;
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
true
|
|
||||||
#else
|
|
||||||
false
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
|
||||||
|
|
||||||
tcache_bin_info_t *tcache_bin_info;
|
tcache_bin_info_t *tcache_bin_info;
|
||||||
@ -93,7 +87,7 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
|||||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
|
tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
assert(tcache->arena);
|
assert(tcache->arena != NULL);
|
||||||
arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
|
arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
|
||||||
config_prof ? tcache->prof_accumbytes : 0);
|
config_prof ? tcache->prof_accumbytes : 0);
|
||||||
if (config_prof) {
|
if (config_prof) {
|
||||||
@ -304,7 +298,7 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
|
|||||||
static void
|
static void
|
||||||
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
|
tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
|
||||||
arena_t *arena = tcache->arena;
|
arena_t *arena = tcache->arena;
|
||||||
assert(arena);
|
assert(arena != NULL);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
/* Unlink from list of extant tcaches. */
|
/* Unlink from list of extant tcaches. */
|
||||||
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
|
||||||
@ -383,10 +377,6 @@ tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
|
|||||||
/* Initialize auto tcache (embedded in TSD). */
|
/* Initialize auto tcache (embedded in TSD). */
|
||||||
bool
|
bool
|
||||||
tsd_tcache_data_init(tsd_t *tsd) {
|
tsd_tcache_data_init(tsd_t *tsd) {
|
||||||
if (!config_tcache) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
tcache_t *tcache = &tsd->tcache;
|
tcache_t *tcache = &tsd->tcache;
|
||||||
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
|
||||||
size_t size = stack_nelms * sizeof(void *);
|
size_t size = stack_nelms * sizeof(void *);
|
||||||
@ -458,9 +448,9 @@ tcache_create_explicit(tsd_t *tsd) {
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
||||||
unsigned i;
|
assert(tcache->arena != NULL);
|
||||||
|
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (unsigned i = 0; i < NBINS; i++) {
|
||||||
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
|
||||||
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
|
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
|
||||||
|
|
||||||
@ -468,7 +458,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
|||||||
assert(tbin->tstats.nrequests == 0);
|
assert(tbin->tstats.nrequests == 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; i < nhbins; i++) {
|
for (unsigned i = NBINS; i < nhbins; i++) {
|
||||||
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
|
||||||
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
|
||||||
|
|
||||||
@ -477,20 +467,17 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_t *arena = tcache->arena;
|
if (config_prof && tcache->prof_accumbytes > 0 &&
|
||||||
if (config_prof && arena && tcache->prof_accumbytes > 0 &&
|
arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
|
||||||
arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) {
|
tcache->prof_accumbytes)) {
|
||||||
prof_idump(tsd_tsdn(tsd));
|
prof_idump(tsd_tsdn(tsd));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_flush(void) {
|
tcache_flush(void) {
|
||||||
tsd_t *tsd;
|
tsd_t *tsd = tsd_fetch();
|
||||||
|
assert(tcache_available(tsd));
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
tsd = tsd_fetch();
|
|
||||||
tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
|
tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -514,10 +501,6 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
|
|||||||
/* For auto tcache (embedded in TSD) only. */
|
/* For auto tcache (embedded in TSD) only. */
|
||||||
void
|
void
|
||||||
tcache_cleanup(tsd_t *tsd) {
|
tcache_cleanup(tsd_t *tsd) {
|
||||||
if (!config_tcache) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||||
if (!tcache_available(tsd)) {
|
if (!tcache_available(tsd)) {
|
||||||
assert(tsd_tcache_enabled_get(tsd) == false);
|
assert(tsd_tcache_enabled_get(tsd) == false);
|
||||||
@ -660,10 +643,6 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) {
|
|||||||
|
|
||||||
bool
|
bool
|
||||||
tcache_boot(tsdn_t *tsdn) {
|
tcache_boot(tsdn_t *tsdn) {
|
||||||
cassert(config_tcache);
|
|
||||||
|
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
/* If necessary, clamp opt_lg_tcache_max. */
|
/* If necessary, clamp opt_lg_tcache_max. */
|
||||||
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
|
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
|
||||||
SMALL_MAXCLASS) {
|
SMALL_MAXCLASS) {
|
||||||
@ -685,6 +664,7 @@ tcache_boot(tsdn_t *tsdn) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
stack_nelms = 0;
|
stack_nelms = 0;
|
||||||
|
unsigned i;
|
||||||
for (i = 0; i < NBINS; i++) {
|
for (i = 0; i < NBINS; i++) {
|
||||||
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
|
||||||
tcache_bin_info[i].ncached_max =
|
tcache_bin_info[i].ncached_max =
|
||||||
|
@ -1,29 +1,11 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
static const bool config_tcache =
|
|
||||||
#ifdef JEMALLOC_TCACHE
|
|
||||||
true
|
|
||||||
#else
|
|
||||||
false
|
|
||||||
#endif
|
|
||||||
;
|
|
||||||
|
|
||||||
void *
|
void *
|
||||||
thd_start(void *arg) {
|
thd_start(void *arg) {
|
||||||
int err;
|
|
||||||
size_t sz;
|
|
||||||
bool e0, e1;
|
bool e0, e1;
|
||||||
|
size_t sz = sizeof(bool);
|
||||||
sz = sizeof(bool);
|
assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
|
||||||
if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL,
|
0), 0, "Unexpected mallctl failure");
|
||||||
0))) {
|
|
||||||
if (err == ENOENT) {
|
|
||||||
assert_false(config_tcache,
|
|
||||||
"ENOENT should only be returned if tcache is "
|
|
||||||
"disabled");
|
|
||||||
}
|
|
||||||
goto label_ENOENT;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (e0) {
|
if (e0) {
|
||||||
e1 = false;
|
e1 = false;
|
||||||
@ -78,7 +60,6 @@ thd_start(void *arg) {
|
|||||||
|
|
||||||
free(malloc(1));
|
free(malloc(1));
|
||||||
return NULL;
|
return NULL;
|
||||||
label_ENOENT:
|
|
||||||
test_skip("\"thread.tcache.enabled\" mallctl not available");
|
test_skip("\"thread.tcache.enabled\" mallctl not available");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,6 @@ for t in $@; do
|
|||||||
# per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
|
# per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
|
||||||
$(enable_fill=@enable_fill@ \
|
$(enable_fill=@enable_fill@ \
|
||||||
enable_prof=@enable_prof@ \
|
enable_prof=@enable_prof@ \
|
||||||
enable_tcache=@enable_tcache@ \
|
|
||||||
. @srcroot@${t}.sh && \
|
. @srcroot@${t}.sh && \
|
||||||
export_malloc_conf && \
|
export_malloc_conf && \
|
||||||
${t}@exe@ @abs_srcroot@ @abs_objroot@)
|
${t}@exe@ @abs_srcroot@ @abs_objroot@)
|
||||||
|
@ -174,16 +174,15 @@ TEST_BEGIN(test_decay_ticks) {
|
|||||||
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
|
assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
|
||||||
0), 0, "Unexpected mallctl failure");
|
0), 0, "Unexpected mallctl failure");
|
||||||
|
|
||||||
int err;
|
|
||||||
/* Set up a manually managed arena for test. */
|
/* Set up a manually managed arena for test. */
|
||||||
arena_ind = do_arena_create(0, 0);
|
arena_ind = do_arena_create(0, 0);
|
||||||
|
|
||||||
/* Migrate to the new arena, and get the ticker. */
|
/* Migrate to the new arena, and get the ticker. */
|
||||||
unsigned old_arena_ind;
|
unsigned old_arena_ind;
|
||||||
size_t sz_arena_ind = sizeof(old_arena_ind);
|
size_t sz_arena_ind = sizeof(old_arena_ind);
|
||||||
err = mallctl("thread.arena", (void *)&old_arena_ind, &sz_arena_ind,
|
assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
|
||||||
(void *)&arena_ind, sizeof(arena_ind));
|
&sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
|
||||||
assert_d_eq(err, 0, "Unexpected mallctl() failure");
|
"Unexpected mallctl() failure");
|
||||||
decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
|
decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
|
||||||
assert_ptr_not_null(decay_ticker,
|
assert_ptr_not_null(decay_ticker,
|
||||||
"Unexpected failure getting decay ticker");
|
"Unexpected failure getting decay ticker");
|
||||||
@ -310,51 +309,48 @@ TEST_BEGIN(test_decay_ticks) {
|
|||||||
* Test tcache fill/flush interactions for large and small size classes,
|
* Test tcache fill/flush interactions for large and small size classes,
|
||||||
* using an explicit tcache.
|
* using an explicit tcache.
|
||||||
*/
|
*/
|
||||||
if (config_tcache) {
|
unsigned tcache_ind, i;
|
||||||
unsigned tcache_ind, i;
|
size_t tcache_sizes[2];
|
||||||
size_t tcache_sizes[2];
|
tcache_sizes[0] = large0;
|
||||||
tcache_sizes[0] = large0;
|
tcache_sizes[1] = 1;
|
||||||
tcache_sizes[1] = 1;
|
|
||||||
|
|
||||||
size_t tcache_max, sz_tcache_max;
|
size_t tcache_max, sz_tcache_max;
|
||||||
sz_tcache_max = sizeof(tcache_max);
|
sz_tcache_max = sizeof(tcache_max);
|
||||||
err = mallctl("arenas.tcache_max", (void *)&tcache_max,
|
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
|
||||||
&sz_tcache_max, NULL, 0);
|
&sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
|
||||||
assert_d_eq(err, 0, "Unexpected mallctl() failure");
|
|
||||||
|
|
||||||
sz = sizeof(unsigned);
|
sz = sizeof(unsigned);
|
||||||
assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
|
assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
|
||||||
NULL, 0), 0, "Unexpected mallctl failure");
|
NULL, 0), 0, "Unexpected mallctl failure");
|
||||||
|
|
||||||
for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
|
for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
|
||||||
sz = tcache_sizes[i];
|
sz = tcache_sizes[i];
|
||||||
|
|
||||||
/* tcache fill. */
|
/* tcache fill. */
|
||||||
tick0 = ticker_read(decay_ticker);
|
tick0 = ticker_read(decay_ticker);
|
||||||
p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
|
p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
tick1 = ticker_read(decay_ticker);
|
tick1 = ticker_read(decay_ticker);
|
||||||
|
assert_u32_ne(tick1, tick0,
|
||||||
|
"Expected ticker to tick during tcache fill "
|
||||||
|
"(sz=%zu)", sz);
|
||||||
|
/* tcache flush. */
|
||||||
|
dallocx(p, MALLOCX_TCACHE(tcache_ind));
|
||||||
|
tick0 = ticker_read(decay_ticker);
|
||||||
|
assert_d_eq(mallctl("tcache.flush", NULL, NULL,
|
||||||
|
(void *)&tcache_ind, sizeof(unsigned)), 0,
|
||||||
|
"Unexpected mallctl failure");
|
||||||
|
tick1 = ticker_read(decay_ticker);
|
||||||
|
|
||||||
|
/* Will only tick if it's in tcache. */
|
||||||
|
if (sz <= tcache_max) {
|
||||||
assert_u32_ne(tick1, tick0,
|
assert_u32_ne(tick1, tick0,
|
||||||
"Expected ticker to tick during tcache fill "
|
"Expected ticker to tick during tcache "
|
||||||
"(sz=%zu)", sz);
|
"flush (sz=%zu)", sz);
|
||||||
/* tcache flush. */
|
} else {
|
||||||
dallocx(p, MALLOCX_TCACHE(tcache_ind));
|
assert_u32_eq(tick1, tick0,
|
||||||
tick0 = ticker_read(decay_ticker);
|
"Unexpected ticker tick during tcache "
|
||||||
assert_d_eq(mallctl("tcache.flush", NULL, NULL,
|
"flush (sz=%zu)", sz);
|
||||||
(void *)&tcache_ind, sizeof(unsigned)), 0,
|
|
||||||
"Unexpected mallctl failure");
|
|
||||||
tick1 = ticker_read(decay_ticker);
|
|
||||||
|
|
||||||
/* Will only tick if it's in tcache. */
|
|
||||||
if (sz <= tcache_max) {
|
|
||||||
assert_u32_ne(tick1, tick0,
|
|
||||||
"Expected ticker to tick during tcache "
|
|
||||||
"flush (sz=%zu)", sz);
|
|
||||||
} else {
|
|
||||||
assert_u32_eq(tick1, tick0,
|
|
||||||
"Unexpected ticker tick during tcache "
|
|
||||||
"flush (sz=%zu)", sz);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -422,18 +418,11 @@ TEST_BEGIN(test_decay_ticker) {
|
|||||||
* the ticker triggers purging.
|
* the ticker triggers purging.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if (config_tcache) {
|
size_t tcache_max;
|
||||||
size_t tcache_max;
|
size_t sz = sizeof(size_t);
|
||||||
|
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
|
||||||
size_t sz = sizeof(size_t);
|
0), 0, "Unexpected mallctl failure");
|
||||||
assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
|
large = nallocx(tcache_max + 1, flags);
|
||||||
&sz, NULL, 0), 0, "Unexpected mallctl failure");
|
|
||||||
large = nallocx(tcache_max + 1, flags);
|
|
||||||
} else {
|
|
||||||
size_t sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("arenas.lextent.0.size", &large, &sz, NULL,
|
|
||||||
0), 0, "Unexpected mallctl failure");
|
|
||||||
}
|
|
||||||
|
|
||||||
do_purge(arena_ind);
|
do_purge(arena_ind);
|
||||||
uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
|
uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
|
||||||
|
@ -1,6 +1,3 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
export MALLOC_CONF="dirty_decay_time:1,muzzy_decay_time:1"
|
export MALLOC_CONF="dirty_decay_time:1,muzzy_decay_time:1,lg_tcache_max:0"
|
||||||
if [ "x${enable_tcache}" = "x1" ] ; then
|
|
||||||
export MALLOC_CONF="${MALLOC_CONF},lg_tcache_max:0"
|
|
||||||
fi
|
|
||||||
|
@ -136,7 +136,6 @@ TEST_BEGIN(test_mallctl_config) {
|
|||||||
TEST_MALLCTL_CONFIG(prof_libgcc, bool);
|
TEST_MALLCTL_CONFIG(prof_libgcc, bool);
|
||||||
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
|
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
|
||||||
TEST_MALLCTL_CONFIG(stats, bool);
|
TEST_MALLCTL_CONFIG(stats, bool);
|
||||||
TEST_MALLCTL_CONFIG(tcache, bool);
|
|
||||||
TEST_MALLCTL_CONFIG(tls, bool);
|
TEST_MALLCTL_CONFIG(tls, bool);
|
||||||
TEST_MALLCTL_CONFIG(utrace, bool);
|
TEST_MALLCTL_CONFIG(utrace, bool);
|
||||||
TEST_MALLCTL_CONFIG(xmalloc, bool);
|
TEST_MALLCTL_CONFIG(xmalloc, bool);
|
||||||
@ -170,8 +169,8 @@ TEST_BEGIN(test_mallctl_opt) {
|
|||||||
TEST_MALLCTL_OPT(bool, zero, fill);
|
TEST_MALLCTL_OPT(bool, zero, fill);
|
||||||
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
||||||
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
|
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
|
||||||
TEST_MALLCTL_OPT(bool, tcache, tcache);
|
TEST_MALLCTL_OPT(bool, tcache, always);
|
||||||
TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
|
TEST_MALLCTL_OPT(size_t, lg_tcache_max, always);
|
||||||
TEST_MALLCTL_OPT(bool, prof, prof);
|
TEST_MALLCTL_OPT(bool, prof, prof);
|
||||||
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
|
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
|
||||||
TEST_MALLCTL_OPT(bool, prof_active, prof);
|
TEST_MALLCTL_OPT(bool, prof_active, prof);
|
||||||
@ -213,8 +212,6 @@ TEST_END
|
|||||||
TEST_BEGIN(test_tcache_none) {
|
TEST_BEGIN(test_tcache_none) {
|
||||||
void *p0, *q, *p1;
|
void *p0, *q, *p1;
|
||||||
|
|
||||||
test_skip_if(!config_tcache);
|
|
||||||
|
|
||||||
/* Allocate p and q. */
|
/* Allocate p and q. */
|
||||||
p0 = mallocx(42, 0);
|
p0 = mallocx(42, 0);
|
||||||
assert_ptr_not_null(p0, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p0, "Unexpected mallocx() failure");
|
||||||
@ -243,8 +240,6 @@ TEST_BEGIN(test_tcache) {
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
size_t sz, psz, qsz;
|
size_t sz, psz, qsz;
|
||||||
|
|
||||||
test_skip_if(!config_tcache);
|
|
||||||
|
|
||||||
psz = 42;
|
psz = 42;
|
||||||
qsz = nallocx(psz, 0) + 1;
|
qsz = nallocx(psz, 0) + 1;
|
||||||
|
|
||||||
|
@ -1,12 +1,8 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
|
export MALLOC_CONF="tcache:false"
|
||||||
if [ "x${enable_prof}" = "x1" ] ; then
|
if [ "x${enable_prof}" = "x1" ] ; then
|
||||||
export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
|
export MALLOC_CONF="${MALLOC_CONF},prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
|
||||||
if [ "x${enable_tcache}" = "x1" ] ; then
|
|
||||||
export MALLOC_CONF="${MALLOC_CONF},tcache:false"
|
|
||||||
fi
|
|
||||||
elif [ "x${enable_tcache}" = "x1" ] ; then
|
|
||||||
export MALLOC_CONF="tcache:false"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ TEST_BEGIN(test_stats_arenas_summary) {
|
|||||||
dallocx(large, 0);
|
dallocx(large, 0);
|
||||||
|
|
||||||
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||||
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
||||||
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
|
||||||
"Unexpected mallctl() failure");
|
"Unexpected mallctl() failure");
|
||||||
|
|
||||||
@ -150,7 +150,7 @@ TEST_BEGIN(test_stats_arenas_small) {
|
|||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||||
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||||
0, "Unexpected mallctl() failure");
|
0, "Unexpected mallctl() failure");
|
||||||
@ -230,6 +230,10 @@ TEST_BEGIN(test_stats_arenas_bins) {
|
|||||||
uint64_t nslabs, nreslabs;
|
uint64_t nslabs, nreslabs;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
|
/* Make sure allocation below isn't satisfied by tcache. */
|
||||||
|
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||||
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
||||||
|
|
||||||
unsigned arena_ind, old_arena_ind;
|
unsigned arena_ind, old_arena_ind;
|
||||||
sz = sizeof(unsigned);
|
sz = sizeof(unsigned);
|
||||||
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
|
||||||
@ -243,7 +247,7 @@ TEST_BEGIN(test_stats_arenas_bins) {
|
|||||||
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||||
config_tcache ? 0 : ENOENT, "Unexpected mallctl() result");
|
opt_tcache ? 0 : EFAULT, "Unexpected mallctl() result");
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||||
0, "Unexpected mallctl() failure");
|
0, "Unexpected mallctl() failure");
|
||||||
@ -266,11 +270,11 @@ TEST_BEGIN(test_stats_arenas_bins) {
|
|||||||
|
|
||||||
sz = sizeof(uint64_t);
|
sz = sizeof(uint64_t);
|
||||||
gen_mallctl_str(cmd, "nfills", arena_ind);
|
gen_mallctl_str(cmd, "nfills", arena_ind);
|
||||||
assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0),
|
assert_d_eq(mallctl(cmd, (void *)&nfills, &sz, NULL, 0), expected,
|
||||||
config_tcache ? expected : ENOENT, "Unexpected mallctl() result");
|
"Unexpected mallctl() result");
|
||||||
gen_mallctl_str(cmd, "nflushes", arena_ind);
|
gen_mallctl_str(cmd, "nflushes", arena_ind);
|
||||||
assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0),
|
assert_d_eq(mallctl(cmd, (void *)&nflushes, &sz, NULL, 0), expected,
|
||||||
config_tcache ? expected : ENOENT, "Unexpected mallctl() result");
|
"Unexpected mallctl() result");
|
||||||
|
|
||||||
gen_mallctl_str(cmd, "nslabs", arena_ind);
|
gen_mallctl_str(cmd, "nslabs", arena_ind);
|
||||||
assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
|
assert_d_eq(mallctl(cmd, (void *)&nslabs, &sz, NULL, 0), expected,
|
||||||
@ -292,7 +296,7 @@ TEST_BEGIN(test_stats_arenas_bins) {
|
|||||||
"nrequests should be greater than zero");
|
"nrequests should be greater than zero");
|
||||||
assert_zu_gt(curregs, 0,
|
assert_zu_gt(curregs, 0,
|
||||||
"allocated should be greater than zero");
|
"allocated should be greater than zero");
|
||||||
if (config_tcache) {
|
if (opt_tcache) {
|
||||||
assert_u64_gt(nfills, 0,
|
assert_u64_gt(nfills, 0,
|
||||||
"At least one fill should have occurred");
|
"At least one fill should have occurred");
|
||||||
assert_u64_gt(nflushes, 0,
|
assert_u64_gt(nflushes, 0,
|
||||||
|
Loading…
Reference in New Issue
Block a user