Merge branch 'rc-4.5.0'

This commit is contained in:
Jason Evans 2017-02-28 19:09:23 -08:00
commit 04380e79f1
83 changed files with 2390 additions and 517 deletions

View File

@ -12,6 +12,20 @@ environment:
CPU: x86_64
- MSYSTEM: MINGW32
CPU: i686
- MSYSTEM: MINGW64
CPU: x86_64
MSVC: amd64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW32
CPU: i686
MSVC: x86
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW64
CPU: x86_64
CONFIG_FLAGS: --enable-debug
- MSYSTEM: MINGW32
CPU: i686
CONFIG_FLAGS: --enable-debug
install:
- set PATH=c:\msys64\%MSYSTEM%\bin;c:\msys64\usr\bin;%PATH%
@ -21,7 +35,7 @@ install:
build_script:
- bash -c "autoconf"
- bash -c "./configure"
- bash -c "./configure $CONFIG_FLAGS"
- mingw32-make -j3
- file lib/jemalloc.dll
- mingw32-make -j3 tests

View File

@ -1,29 +1,95 @@
language: c
language: generic
matrix:
include:
- os: linux
compiler: gcc
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS=""
- os: osx
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS=""
- os: linux
compiler: gcc
env:
- EXTRA_FLAGS=-m32
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS=""
- os: linux
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS=""
addons:
apt:
packages:
- gcc-multilib
- gcc-multilib
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache"
- os: osx
compiler: clang
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS=""
- os: osx
compiler: clang
env:
- EXTRA_FLAGS=-m32
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS=""
- os: osx
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug"
- os: osx
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats"
- os: osx
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache"
- os: linux
env: CC=clang COMPILER_FLAGS="-m32" CONFIGURE_FLAGS=""
addons:
apt:
packages:
- gcc-multilib
- os: linux
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug"
- os: linux
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof"
- os: linux
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats"
- os: linux
env: CC=clang COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-tcache"
- os: linux
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-debug"
addons:
apt:
packages:
- gcc-multilib
- os: linux
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--enable-prof"
addons:
apt:
packages:
- gcc-multilib
- os: linux
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-stats"
addons:
apt:
packages:
- gcc-multilib
- os: linux
env: CC=gcc COMPILER_FLAGS="-m32" CONFIGURE_FLAGS="--disable-tcache"
addons:
apt:
packages:
- gcc-multilib
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-prof"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-stats"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-tcache"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-stats"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-prof --disable-tcache"
- os: linux
env: CC=gcc COMPILER_FLAGS="" CONFIGURE_FLAGS="--disable-stats --disable-tcache"
before_script:
- autoconf
- ./configure${EXTRA_FLAGS:+ CC="$CC $EXTRA_FLAGS"}
- ./configure ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" } $CONFIGURE_FLAGS
- make -j3
- make -j3 tests
script:
- make check

View File

@ -4,6 +4,41 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.5.0 (February 28, 2017)
This is the first release to benefit from much broader continuous integration
testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure
in place for prior releases, it would have caught all of the most serious
regressions fixed by this release.
New features:
- Add --disable-thp and the opt.thp to provide opt-out mechanisms for
transparent huge page integration. (@jasone)
- Update zone allocator integration to work with macOS 10.12. (@glandium)
- Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and
EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not
during configuration. (@jasone, @ronawho)
Bug fixes:
- Fix DSS (sbrk(2)-based) allocation. This regression was first released in
4.3.0. (@jasone)
- Handle race in per size class utilization computation. This functionality
was first released in 4.0.0. (@interwq)
- Fix lock order reversal during gdump. (@jasone)
- Fix-refactor tcache synchronization. This regression was first released in
4.0.0. (@jasone)
- Fix various JSON-formatted malloc_stats_print() bugs. This functionality
was first released in 4.3.0. (@jasone)
- Fix huge-aligned allocation. This regression was first released in 4.4.0.
(@jasone)
- When transparent huge page integration is enabled, detect what state pages
start in according to the kernel's current operating mode, and only convert
arena chunks to non-huge during purging if that is not their initial state.
This functionality was first released in 4.4.0. (@jasone)
- Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case.
This regression was first released in 4.0.0. (@jasone, @428desmo)
- Properly detect sparc64 when building for Linux. (@glaubitz)
* 4.4.0 (December 3, 2016)
New features:

24
INSTALL
View File

@ -157,6 +157,13 @@ any of the following arguments (not a definitive list) to 'configure':
released in bulk, thus reducing the total number of mutex operations. See
the "opt.tcache" option for usage details.
--disable-thp
Disable transparent huge page (THP) integration. On systems with THP
support, THPs are explicitly disabled as a side effect of unused dirty page
purging for chunks that back small and/or large allocations, because such
chunks typically comprise active, unused dirty, and untouched clean
pages.
--disable-munmap
Disable virtual memory deallocation via munmap(2); instead keep track of
the virtual memory for later use. munmap() is disabled by default (i.e.
@ -306,17 +313,16 @@ The following environment variables (not a definitive list) impact configure's
behavior:
CFLAGS="?"
Pass these flags to the compiler. You probably shouldn't define this unless
you know what you are doing. (Use EXTRA_CFLAGS instead.)
Pass these flags to the C compiler. Any flags set by the configure script
are prepended, which means explicitly set flags generally take precedence.
Take care when specifying flags such as -Werror, because configure tests may
be affected in undesirable ways.
EXTRA_CFLAGS="?"
Append these flags to CFLAGS. This makes it possible to add flags such as
-Werror, while allowing the configure script to determine what other flags
are appropriate for the specified configuration.
The configure script specifically checks whether an optimization flag (-O*)
is specified in EXTRA_CFLAGS, and refrains from specifying an optimization
level if it finds that one has already been specified.
Append these flags to CFLAGS, without passing them to the compiler during
configuration. This makes it possible to add flags such as -Werror, while
allowing the configure script to determine what other flags are appropriate
for the specified configuration.
CPPFLAGS="?"
Pass these flags to the C preprocessor. Note that CFLAGS is not passed to

View File

@ -24,8 +24,10 @@ abs_objroot := @abs_objroot@
# Build parameters.
CPPFLAGS := @CPPFLAGS@ -I$(srcroot)include -I$(objroot)include
CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
EXTRA_CFLAGS := @EXTRA_CFLAGS@
CFLAGS := @CFLAGS@ $(EXTRA_CFLAGS)
CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
LDFLAGS := @LDFLAGS@
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
LIBS := @LIBS@
@ -156,6 +158,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/bitmap.c \
$(srcroot)test/unit/ckh.c \
$(srcroot)test/unit/decay.c \
$(srcroot)test/unit/extent_quantize.c \
$(srcroot)test/unit/fork.c \
$(srcroot)test/unit/hash.c \
$(srcroot)test/unit/junk.c \
@ -186,6 +189,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/size_classes.c \
$(srcroot)test/unit/smoothstep.c \
$(srcroot)test/unit/stats.c \
$(srcroot)test/unit/stats_print.c \
$(srcroot)test/unit/ticker.c \
$(srcroot)test/unit/nstime.c \
$(srcroot)test/unit/tsd.c \

View File

@ -6,29 +6,66 @@ AC_CONFIG_AUX_DIR([build-aux])
dnl ============================================================================
dnl Custom macro definitions.
dnl JE_CFLAGS_APPEND(cflag)
AC_DEFUN([JE_CFLAGS_APPEND],
dnl JE_CONCAT_VVV(r, a, b)
dnl
dnl Set $r to the concatenation of $a and $b, with a space separating them iff
dnl both $a and $b are non-emty.
AC_DEFUN([JE_CONCAT_VVV],
if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
$1="[$]{$2}[$]{$3}"
else
$1="[$]{$2} [$]{$3}"
fi
)
dnl JE_APPEND_VS(a, b)
dnl
dnl Set $a to the concatenation of $a and b, with a space separating them iff
dnl both $a and b are non-empty.
AC_DEFUN([JE_APPEND_VS],
T_APPEND_V=$2
JE_CONCAT_VVV($1, $1, T_APPEND_V)
)
CONFIGURE_CFLAGS=
SPECIFIED_CFLAGS="${CFLAGS}"
dnl JE_CFLAGS_ADD(cflag)
dnl
dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS
dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro
dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS.
AC_DEFUN([JE_CFLAGS_ADD],
[
AC_MSG_CHECKING([whether compiler supports $1])
TCFLAGS="${CFLAGS}"
if test "x${CFLAGS}" = "x" ; then
CFLAGS="$1"
else
CFLAGS="${CFLAGS} $1"
fi
T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
JE_APPEND_VS(CONFIGURE_CFLAGS, $1)
JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
[[
]], [[
return 0;
]])],
[je_cv_cflags_appended=$1]
[je_cv_cflags_added=$1]
AC_MSG_RESULT([yes]),
[je_cv_cflags_appended=]
[je_cv_cflags_added=]
AC_MSG_RESULT([no])
[CFLAGS="${TCFLAGS}"]
[CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"]
)
JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
])
dnl JE_CFLAGS_SAVE()
dnl JE_CFLAGS_RESTORE()
dnl
dnl Save/restore CFLAGS. Nesting is not supported.
AC_DEFUN([JE_CFLAGS_SAVE],
SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
)
AC_DEFUN([JE_CFLAGS_RESTORE],
CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
)
dnl JE_COMPILABLE(label, hcode, mcode, rvar)
dnl
dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
@ -168,46 +205,45 @@ if test "x${je_cv_cray}" = "xyes" ; then
[je_cv_cray_84=no])])
fi
if test "x$CFLAGS" = "x" ; then
no_CFLAGS="yes"
if test "x$GCC" = "xyes" ; then
JE_CFLAGS_APPEND([-std=gnu11])
if test "x$je_cv_cflags_appended" = "x-std=gnu11" ; then
if test "x$GCC" = "xyes" ; then
JE_CFLAGS_ADD([-std=gnu11])
if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
else
JE_CFLAGS_ADD([-std=gnu99])
if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
else
JE_CFLAGS_APPEND([-std=gnu99])
if test "x$je_cv_cflags_appended" = "x-std=gnu99" ; then
AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
fi
fi
JE_CFLAGS_APPEND([-Wall])
JE_CFLAGS_APPEND([-Werror=declaration-after-statement])
JE_CFLAGS_APPEND([-Wshorten-64-to-32])
JE_CFLAGS_APPEND([-Wsign-compare])
JE_CFLAGS_APPEND([-pipe])
JE_CFLAGS_APPEND([-g3])
elif test "x$je_cv_msvc" = "xyes" ; then
CC="$CC -nologo"
JE_CFLAGS_APPEND([-Zi])
JE_CFLAGS_APPEND([-MT])
JE_CFLAGS_APPEND([-W3])
JE_CFLAGS_APPEND([-FS])
CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat"
fi
if test "x$je_cv_cray" = "xyes" ; then
dnl cray compiler 8.4 has an inlining bug
if test "x$je_cv_cray_84" = "xyes" ; then
JE_CFLAGS_APPEND([-hipa2])
JE_CFLAGS_APPEND([-hnognu])
fi
if test "x$enable_cc_silence" != "xno" ; then
dnl ignore unreachable code warning
JE_CFLAGS_APPEND([-hnomessage=128])
dnl ignore redefinition of "malloc", "free", etc warning
JE_CFLAGS_APPEND([-hnomessage=1357])
fi
JE_CFLAGS_ADD([-Wall])
JE_CFLAGS_ADD([-Werror=declaration-after-statement])
JE_CFLAGS_ADD([-Wshorten-64-to-32])
JE_CFLAGS_ADD([-Wsign-compare])
JE_CFLAGS_ADD([-pipe])
JE_CFLAGS_ADD([-g3])
elif test "x$je_cv_msvc" = "xyes" ; then
CC="$CC -nologo"
JE_CFLAGS_ADD([-Zi])
JE_CFLAGS_ADD([-MT])
JE_CFLAGS_ADD([-W3])
JE_CFLAGS_ADD([-FS])
JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat)
fi
if test "x$je_cv_cray" = "xyes" ; then
dnl cray compiler 8.4 has an inlining bug
if test "x$je_cv_cray_84" = "xyes" ; then
JE_CFLAGS_ADD([-hipa2])
JE_CFLAGS_ADD([-hnognu])
fi
if test "x$enable_cc_silence" != "xno" ; then
dnl ignore unreachable code warning
JE_CFLAGS_ADD([-hnomessage=128])
dnl ignore redefinition of "malloc", "free", etc warning
JE_CFLAGS_ADD([-hnomessage=1357])
fi
fi
AC_SUBST([CONFIGURE_CFLAGS])
AC_SUBST([SPECIFIED_CFLAGS])
AC_SUBST([EXTRA_CFLAGS])
AC_PROG_CPP
@ -217,7 +253,7 @@ if test "x${ac_cv_big_endian}" = "x1" ; then
fi
if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
CPPFLAGS="$CPPFLAGS -I${srcdir}/include/msvc_compat/C99"
JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99)
fi
if test "x${je_cv_msvc}" = "xyes" ; then
@ -348,7 +384,6 @@ dnl
dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
CFLAGS="$CFLAGS"
default_munmap="1"
maps_coalesce="1"
case "${host}" in
@ -380,7 +415,7 @@ case "${host}" in
;;
*-*-linux-android)
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
abi="elf"
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
@ -391,7 +426,7 @@ case "${host}" in
;;
*-*-linux* | *-*-kfreebsd*)
dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
abi="elf"
AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
@ -416,8 +451,8 @@ case "${host}" in
abi="elf"
RPATH='-Wl,-R,$(1)'
dnl Solaris needs this for sigwait().
CPPFLAGS="$CPPFLAGS -D_POSIX_PTHREAD_SEMANTICS"
LIBS="$LIBS -lposix4 -lsocket -lnsl"
JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS)
JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
;;
*-ibm-aix*)
if "$LG_SIZEOF_PTR" = "8"; then
@ -515,19 +550,19 @@ JE_COMPILABLE([__attribute__ syntax],
if test "x${je_cv_attribute}" = "xyes" ; then
AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
JE_CFLAGS_APPEND([-fvisibility=hidden])
JE_CFLAGS_ADD([-fvisibility=hidden])
fi
fi
dnl Check for tls_model attribute support (clang 3.0 still lacks support).
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
JE_CFLAGS_APPEND([-herror_on_warning])
JE_CFLAGS_SAVE()
JE_CFLAGS_ADD([-Werror])
JE_CFLAGS_ADD([-herror_on_warning])
JE_COMPILABLE([tls_model attribute], [],
[static __thread int
__attribute__((tls_model("initial-exec"), unused)) foo;
foo = 0;],
[je_cv_tls_model])
CFLAGS="${SAVED_CFLAGS}"
JE_CFLAGS_RESTORE()
if test "x${je_cv_tls_model}" = "xyes" ; then
AC_DEFINE([JEMALLOC_TLS_MODEL],
[__attribute__((tls_model("initial-exec")))])
@ -535,35 +570,35 @@ else
AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
fi
dnl Check for alloc_size attribute support.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
JE_CFLAGS_APPEND([-herror_on_warning])
JE_CFLAGS_SAVE()
JE_CFLAGS_ADD([-Werror])
JE_CFLAGS_ADD([-herror_on_warning])
JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
[void *foo(size_t size) __attribute__((alloc_size(1)));],
[je_cv_alloc_size])
CFLAGS="${SAVED_CFLAGS}"
JE_CFLAGS_RESTORE()
if test "x${je_cv_alloc_size}" = "xyes" ; then
AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ])
fi
dnl Check for format(gnu_printf, ...) attribute support.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
JE_CFLAGS_APPEND([-herror_on_warning])
JE_CFLAGS_SAVE()
JE_CFLAGS_ADD([-Werror])
JE_CFLAGS_ADD([-herror_on_warning])
JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
[void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
[je_cv_format_gnu_printf])
CFLAGS="${SAVED_CFLAGS}"
JE_CFLAGS_RESTORE()
if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ])
fi
dnl Check for format(printf, ...) attribute support.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
JE_CFLAGS_APPEND([-herror_on_warning])
JE_CFLAGS_SAVE()
JE_CFLAGS_ADD([-Werror])
JE_CFLAGS_ADD([-herror_on_warning])
JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
[void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
[je_cv_format_printf])
CFLAGS="${SAVED_CFLAGS}"
JE_CFLAGS_RESTORE()
if test "x${je_cv_format_printf}" = "xyes" ; then
AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ])
fi
@ -625,9 +660,9 @@ if test "x$enable_code_coverage" = "x1" ; then
deoptimize="no"
echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || deoptimize="yes"
if test "x${deoptimize}" = "xyes" ; then
JE_CFLAGS_APPEND([-O0])
JE_CFLAGS_ADD([-O0])
fi
JE_CFLAGS_APPEND([-fprofile-arcs -ftest-coverage])
JE_CFLAGS_ADD([-fprofile-arcs -ftest-coverage])
EXTRA_LDFLAGS="$EXTRA_LDFLAGS -fprofile-arcs -ftest-coverage"
AC_DEFINE([JEMALLOC_CODE_COVERAGE], [ ])
fi
@ -817,19 +852,14 @@ if test "x$enable_ivsalloc" = "x1" ; then
fi
dnl Only optimize if not debugging.
if test "x$enable_debug" = "x0" -a "x$no_CFLAGS" = "xyes" ; then
dnl Make sure that an optimization flag was not specified in EXTRA_CFLAGS.
optimize="no"
echo "$CFLAGS $EXTRA_CFLAGS" | grep '\-O' >/dev/null || optimize="yes"
if test "x${optimize}" = "xyes" ; then
if test "x$GCC" = "xyes" ; then
JE_CFLAGS_APPEND([-O3])
JE_CFLAGS_APPEND([-funroll-loops])
elif test "x$je_cv_msvc" = "xyes" ; then
JE_CFLAGS_APPEND([-O2])
else
JE_CFLAGS_APPEND([-O])
fi
if test "x$enable_debug" = "x0" ; then
if test "x$GCC" = "xyes" ; then
JE_CFLAGS_ADD([-O3])
JE_CFLAGS_ADD([-funroll-loops])
elif test "x$je_cv_msvc" = "xyes" ; then
JE_CFLAGS_ADD([-O2])
else
JE_CFLAGS_ADD([-O])
fi
fi
@ -893,10 +923,10 @@ fi,
if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
if test "x$LUNWIND" = "x-lunwind" ; then
AC_CHECK_LIB([unwind], [unw_backtrace], [LIBS="$LIBS $LUNWIND"],
AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)],
[enable_prof_libunwind="0"])
else
LIBS="$LIBS $LUNWIND"
JE_APPEND_VS(LIBS, $LUNWIND)
fi
if test "x${enable_prof_libunwind}" = "x1" ; then
backtrace_method="libunwind"
@ -918,7 +948,7 @@ fi
if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
-a "x$GCC" = "xyes" ; then
AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [LIBS="$LIBS -lgcc"], [enable_prof_libgcc="0"])
AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
if test "x${enable_prof_libgcc}" = "x1" ; then
backtrace_method="libgcc"
AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
@ -940,7 +970,7 @@ fi
)
if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
-a "x$GCC" = "xyes" ; then
JE_CFLAGS_APPEND([-fno-omit-frame-pointer])
JE_CFLAGS_ADD([-fno-omit-frame-pointer])
backtrace_method="gcc intrinsics"
AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
else
@ -955,9 +985,7 @@ AC_MSG_CHECKING([configured backtracing method])
AC_MSG_RESULT([$backtrace_method])
if test "x$enable_prof" = "x1" ; then
dnl Heap profiling uses the log(3) function.
if test "x$LM" != "x" ; then
LIBS="$LIBS $LM"
fi
JE_APPEND_VS(LIBS, $LM)
AC_DEFINE([JEMALLOC_PROF], [ ])
fi
@ -1326,7 +1354,7 @@ if test "x$abi" != "xpecoff" ; then
AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
dnl Some systems may embed pthreads functionality in libc; check for libpthread
dnl first, but try libc too before failing.
AC_CHECK_LIB([pthread], [pthread_create], [LIBS="$LIBS -lpthread"],
AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -lpthread)],
[AC_SEARCH_LIBS([pthread_create], , ,
AC_MSG_ERROR([libpthread is missing]))])
JE_COMPILABLE([pthread_atfork(3)], [
@ -1339,7 +1367,7 @@ if test "x$abi" != "xpecoff" ; then
fi
fi
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
JE_APPEND_VS(CPPFLAGS, -D_REENTRANT)
dnl Check whether clock_gettime(2) is in libc or librt.
AC_SEARCH_LIBS([clock_gettime], [rt])
@ -1348,13 +1376,13 @@ dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
if test "$ac_cv_search_clock_gettime" != "-lrt"; then
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_SAVE()
unset ac_cv_search_clock_gettime
JE_CFLAGS_APPEND([-dynamic])
JE_CFLAGS_ADD([-dynamic])
AC_SEARCH_LIBS([clock_gettime], [rt])
CFLAGS="${SAVED_CFLAGS}"
JE_CFLAGS_RESTORE()
fi
fi
@ -1410,8 +1438,8 @@ fi
if test "x$enable_syscall" = "x1" ; then
dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS
dnl X 10.12's deprecation warning prevents use.
SAVED_CFLAGS="${CFLAGS}"
JE_CFLAGS_APPEND([-Werror])
JE_CFLAGS_SAVE()
JE_CFLAGS_ADD([-Werror])
JE_COMPILABLE([syscall(2)], [
#include <sys/syscall.h>
#include <unistd.h>
@ -1419,7 +1447,7 @@ if test "x$enable_syscall" = "x1" ; then
syscall(SYS_write, 2, "hello", 5);
],
[je_cv_syscall])
CFLAGS="${SAVED_CFLAGS}"
JE_CFLAGS_RESTORE()
if test "x$je_cv_syscall" = "xyes" ; then
AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ])
fi
@ -1495,7 +1523,7 @@ if test "x$enable_lazy_lock" = "x1" ; then
if test "x$abi" != "xpecoff" ; then
AC_CHECK_HEADERS([dlfcn.h], , [AC_MSG_ERROR([dlfcn.h is missing])])
AC_CHECK_FUNC([dlsym], [],
[AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"],
[AC_CHECK_LIB([dl], [dlsym], [JE_APPEND_VS(LIBS, -ldl)],
[AC_MSG_ERROR([libdl is missing])])
])
fi
@ -1655,10 +1683,31 @@ if test "x${je_cv_madvise}" = "xyes" ; then
madvise((void *)0, 0, MADV_NOHUGEPAGE);
], [je_cv_thp])
if test "x${je_cv_thp}" = "xyes" ; then
AC_DEFINE([JEMALLOC_THP], [ ])
AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ])
fi
fi
dnl Enable transparent huge page support by default.
AC_ARG_ENABLE([thp],
[AS_HELP_STRING([--disable-thp],
[Disable transparent huge page supprot])],
[if test "x$enable_thp" = "xno" -o "x${je_cv_thp}" != "xyes" ; then
enable_thp="0"
else
enable_thp="1"
fi
],
[if test "x${je_cv_thp}" = "xyes" ; then
enable_thp="1"
else
enable_thp="0"
fi
])
if test "x$enable_thp" = "x1" ; then
AC_DEFINE([JEMALLOC_THP], [ ])
fi
AC_SUBST([enable_thp])
dnl ============================================================================
dnl Check whether __sync_{add,sub}_and_fetch() are available despite
dnl __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n macros being undefined.
@ -1774,37 +1823,6 @@ if test "x${enable_zone_allocator}" = "x1" ; then
AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
fi
AC_DEFINE([JEMALLOC_ZONE], [ ])
dnl The szone version jumped from 3 to 6 between the OS X 10.5.x and 10.6
dnl releases. malloc_zone_t and malloc_introspection_t have new fields in
dnl 10.6, which is the only source-level indication of the change.
AC_MSG_CHECKING([malloc zone version])
AC_DEFUN([JE_ZONE_PROGRAM],
[AC_LANG_PROGRAM(
[#include <malloc/malloc.h>],
[static int foo[[sizeof($1) $2 sizeof(void *) * $3 ? 1 : -1]]]
)])
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,14)],[JEMALLOC_ZONE_VERSION=3],[
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,15)],[JEMALLOC_ZONE_VERSION=5],[
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,16)],[
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,9)],[JEMALLOC_ZONE_VERSION=6],[
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_introspection_t,==,13)],[JEMALLOC_ZONE_VERSION=7],[JEMALLOC_ZONE_VERSION=]
)])],[
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,==,17)],[JEMALLOC_ZONE_VERSION=8],[
AC_COMPILE_IFELSE([JE_ZONE_PROGRAM(malloc_zone_t,>,17)],[JEMALLOC_ZONE_VERSION=9],[JEMALLOC_ZONE_VERSION=]
)])])])])
if test "x${JEMALLOC_ZONE_VERSION}" = "x"; then
AC_MSG_RESULT([unsupported])
AC_MSG_ERROR([Unsupported malloc zone version])
fi
if test "${JEMALLOC_ZONE_VERSION}" = 9; then
JEMALLOC_ZONE_VERSION=8
AC_MSG_RESULT([> 8])
else
AC_MSG_RESULT([$JEMALLOC_ZONE_VERSION])
fi
AC_DEFINE_UNQUOTED(JEMALLOC_ZONE_VERSION, [$JEMALLOC_ZONE_VERSION])
fi
dnl ============================================================================
@ -1978,7 +1996,8 @@ AC_MSG_RESULT([library revision : ${rev}])
AC_MSG_RESULT([])
AC_MSG_RESULT([CONFIG : ${CONFIG}])
AC_MSG_RESULT([CC : ${CC}])
AC_MSG_RESULT([CFLAGS : ${CFLAGS}])
AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}])
AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}])
AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
@ -2016,6 +2035,7 @@ AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
AC_MSG_RESULT([tcache : ${enable_tcache}])
AC_MSG_RESULT([thp : ${enable_thp}])
AC_MSG_RESULT([fill : ${enable_fill}])
AC_MSG_RESULT([utrace : ${enable_utrace}])
AC_MSG_RESULT([valgrind : ${enable_valgrind}])

View File

@ -850,6 +850,17 @@ for (i = 0; i < nbins; i++) {
during build configuration.</para></listitem>
</varlistentry>
<varlistentry id="config.thp">
<term>
<mallctl>config.thp</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para><option>--disable-thp</option> was not specified
during build configuration, and the system supports transparent huge
page manipulation.</para></listitem>
</varlistentry>
<varlistentry id="config.tls">
<term>
<mallctl>config.tls</mallctl>
@ -1162,6 +1173,21 @@ malloc_conf = "xmalloc:true";]]></programlisting>
forcefully disabled.</para></listitem>
</varlistentry>
<varlistentry id="opt.thp">
<term>
<mallctl>opt.thp</mallctl>
(<type>bool</type>)
<literal>r-</literal>
[<option>--enable-thp</option>]
</term>
<listitem><para>Transparent huge page (THP) integration
enabled/disabled. When enabled, THPs are explicitly disabled as a side
effect of unused dirty page purging for chunks that back small and/or
large allocations, because such chunks typically comprise active,
unused dirty, and untouched clean pages. This option is enabled by
default.</para></listitem>
</varlistentry>
<varlistentry id="opt.lg_tcache_max">
<term>
<mallctl>opt.lg_tcache_max</mallctl>

View File

@ -506,6 +506,7 @@ static const size_t large_pad =
#endif
;
extern bool opt_thp;
extern purge_mode_t opt_purge;
extern const char *purge_mode_names[];
extern ssize_t opt_lg_dirty_mult;

View File

@ -52,8 +52,8 @@ chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
bool chunk_register(tsdn_t *tsdn, const void *chunk,
const extent_node_t *node);
bool chunk_register(const void *chunk, const extent_node_t *node,
bool *gdump);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,

View File

@ -75,6 +75,11 @@ typedef rb_tree(extent_node_t) extent_tree_t;
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_JET
size_t extent_size_quantize_floor(size_t size);
#endif
size_t extent_size_quantize_ceil(size_t size);
rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t)
rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t)

View File

@ -99,6 +99,13 @@ static const bool config_tcache =
false
#endif
;
static const bool config_thp =
#ifdef JEMALLOC_THP
true
#else
false
#endif
;
static const bool config_tls =
#ifdef JEMALLOC_TLS
true
@ -158,7 +165,6 @@ static const bool config_cache_oblivious =
#include <mach/mach_error.h>
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <malloc/malloc.h>
#endif
#include "jemalloc/internal/ph.h"

View File

@ -239,7 +239,6 @@
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
#undef JEMALLOC_ZONE
#undef JEMALLOC_ZONE_VERSION
/*
* Methods for determining whether the OS overcommits.
@ -253,6 +252,12 @@
/* Defined if madvise(2) is available. */
#undef JEMALLOC_HAVE_MADVISE
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
#undef JEMALLOC_HAVE_MADVISE_HUGE
/*
* Methods for purging unused pages differ between operating systems.
*
@ -265,10 +270,7 @@
#undef JEMALLOC_PURGE_MADVISE_FREE
#undef JEMALLOC_PURGE_MADVISE_DONTNEED
/*
* Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
* arguments to madvise(2).
*/
/* Defined if transparent huge page support is enabled. */
#undef JEMALLOC_THP
/* Define if operating system has alloca.h header. */

View File

@ -76,7 +76,7 @@ mb_write(void)
: "memory" /* Clobbers. */
);
}
#elif defined(__sparc64__)
#elif defined(__sparc__) && defined(__arch64__)
JEMALLOC_INLINE void
mb_write(void)
{

View File

@ -85,8 +85,8 @@ JEMALLOC_INLINE void
malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_assert_not_owner(tsdn, &mutex->witness);
if (isthreaded) {
witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
@ -100,16 +100,16 @@ malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
#else
pthread_mutex_lock(&mutex->lock);
#endif
witness_lock(tsdn, &mutex->witness);
}
witness_lock(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
witness_unlock(tsdn, &mutex->witness);
if (isthreaded) {
witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
@ -130,16 +130,14 @@ JEMALLOC_INLINE void
malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_owner(tsdn, &mutex->witness);
witness_assert_owner(tsdn, &mutex->witness);
}
JEMALLOC_INLINE void
malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded)
witness_assert_not_owner(tsdn, &mutex->witness);
witness_assert_not_owner(tsdn, &mutex->witness);
}
#endif

View File

@ -223,6 +223,8 @@ extent_node_sn_get
extent_node_sn_set
extent_node_zeroed_get
extent_node_zeroed_set
extent_size_quantize_ceil
extent_size_quantize_floor
extent_tree_ad_destroy
extent_tree_ad_destroy_recurse
extent_tree_ad_empty
@ -390,6 +392,7 @@ opt_quarantine
opt_redzone
opt_stats_print
opt_tcache
opt_thp
opt_utrace
opt_xmalloc
opt_zero
@ -529,6 +532,9 @@ tcache_flush
tcache_get
tcache_get_hard
tcache_maxclass
tcache_postfork_child
tcache_postfork_parent
tcache_prefork
tcache_salloc
tcache_stats_merge
tcaches
@ -612,14 +618,16 @@ valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_depth
witness_assert_depth_to_rank
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner
witness_depth_error
witness_fork_cleanup
witness_init
witness_lock
witness_lock_error
witness_lockless_error
witness_not_owner_error
witness_owner
witness_owner_error

View File

@ -149,6 +149,9 @@ bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
bool tcache_boot(tsdn_t *tsdn);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/

View File

@ -479,13 +479,14 @@ a_name##tsd_wrapper_get(bool init) \
\
if (init && unlikely(wrapper == NULL)) { \
tsd_init_block_t block; \
wrapper = tsd_init_check_recursion( \
&a_name##tsd_init_head, &block); \
wrapper = (a_name##tsd_wrapper_t *) \
tsd_init_check_recursion(&a_name##tsd_init_head, \
&block); \
if (wrapper) \
return (wrapper); \
wrapper = (a_name##tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \
block.data = wrapper; \
block.data = (void *)wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \

View File

@ -12,21 +12,32 @@ typedef int witness_comp_t (const witness_t *, const witness_t *);
*/
#define WITNESS_RANK_OMIT 0U
#define WITNESS_RANK_MIN 1U
#define WITNESS_RANK_INIT 1U
#define WITNESS_RANK_CTL 1U
#define WITNESS_RANK_ARENAS 2U
#define WITNESS_RANK_TCACHES 2U
#define WITNESS_RANK_ARENAS 3U
#define WITNESS_RANK_PROF_DUMP 3U
#define WITNESS_RANK_PROF_BT2GCTX 4U
#define WITNESS_RANK_PROF_TDATAS 5U
#define WITNESS_RANK_PROF_TDATA 6U
#define WITNESS_RANK_PROF_GCTX 7U
#define WITNESS_RANK_PROF_DUMP 4U
#define WITNESS_RANK_PROF_BT2GCTX 5U
#define WITNESS_RANK_PROF_TDATAS 6U
#define WITNESS_RANK_PROF_TDATA 7U
#define WITNESS_RANK_PROF_GCTX 8U
#define WITNESS_RANK_ARENA 8U
#define WITNESS_RANK_ARENA_CHUNKS 9U
#define WITNESS_RANK_ARENA_NODE_CACHE 10
/*
* Used as an argument to witness_assert_depth_to_rank() in order to validate
* depth excluding non-core locks with lower ranks. Since the rank argument to
* witness_assert_depth_to_rank() is inclusive rather than exclusive, this
* definition can have the same value as the minimally ranked core lock.
*/
#define WITNESS_RANK_CORE 9U
#define WITNESS_RANK_BASE 11U
#define WITNESS_RANK_ARENA 9U
#define WITNESS_RANK_ARENA_CHUNKS 10U
#define WITNESS_RANK_ARENA_NODE_CACHE 11U
#define WITNESS_RANK_BASE 12U
#define WITNESS_RANK_LEAF 0xffffffffU
#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
@ -91,10 +102,12 @@ extern witness_not_owner_error_t *witness_not_owner_error;
void witness_not_owner_error(const witness_t *witness);
#endif
#ifdef JEMALLOC_JET
typedef void (witness_lockless_error_t)(const witness_list_t *);
extern witness_lockless_error_t *witness_lockless_error;
typedef void (witness_depth_error_t)(const witness_list_t *,
witness_rank_t rank_inclusive, unsigned depth);
extern witness_depth_error_t *witness_depth_error;
#else
void witness_lockless_error(const witness_list_t *witnesses);
void witness_depth_error(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth);
#endif
void witnesses_cleanup(tsd_t *tsd);
@ -111,6 +124,9 @@ void witness_postfork_child(tsd_t *tsd);
bool witness_owner(tsd_t *tsd, const witness_t *witness);
void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
void witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth);
void witness_assert_depth(tsdn_t *tsdn, unsigned depth);
void witness_assert_lockless(tsdn_t *tsdn);
void witness_lock(tsdn_t *tsdn, witness_t *witness);
void witness_unlock(tsdn_t *tsdn, witness_t *witness);
@ -123,6 +139,8 @@ witness_owner(tsd_t *tsd, const witness_t *witness)
witness_list_t *witnesses;
witness_t *w;
cassert(config_debug);
witnesses = tsd_witnessesp_get(tsd);
ql_foreach(w, witnesses, link) {
if (w == witness)
@ -175,9 +193,10 @@ witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn)
{
witness_assert_depth_to_rank(tsdn_t *tsdn, witness_rank_t rank_inclusive,
unsigned depth) {
tsd_t *tsd;
unsigned d;
witness_list_t *witnesses;
witness_t *w;
@ -188,10 +207,29 @@ witness_assert_lockless(tsdn_t *tsdn)
return;
tsd = tsdn_tsd(tsdn);
d = 0;
witnesses = tsd_witnessesp_get(tsd);
w = ql_last(witnesses, link);
if (w != NULL)
witness_lockless_error(witnesses);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
break;
}
d++;
}
}
if (d != depth)
witness_depth_error(witnesses, rank_inclusive, depth);
}
JEMALLOC_INLINE void
witness_assert_depth(tsdn_t *tsdn, unsigned depth) {
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_MIN, depth);
}
JEMALLOC_INLINE void
witness_assert_lockless(tsdn_t *tsdn) {
witness_assert_depth(tsdn, 0);
}
JEMALLOC_INLINE void

0
msvc/projects/vc2015/test_threads/test_threads.cpp Executable file → Normal file
View File

85
scripts/gen_travis.py Executable file
View File

@ -0,0 +1,85 @@
#!/usr/bin/env python
from itertools import combinations
travis_template = """\
language: generic
matrix:
include:
%s
before_script:
- autoconf
- ./configure ${COMPILER_FLAGS:+ \
CC="$CC $COMPILER_FLAGS" } \
$CONFIGURE_FLAGS
- make -j3
- make -j3 tests
script:
- make check
"""
# The 'default' configuration is gcc, on linux, with no compiler or configure
# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
# --disable-stats, and --disable-tcache. To avoid abusing travis though, we
# don't test all 2**7 = 128 possible combinations of these; instead, we only
# test combinations of up to 2 'unusual' settings, under the hope that bugs
# involving interactions of such settings are rare.
# things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
MAX_UNUSUAL_OPTIONS = 2
os_default = 'linux'
os_unusual = 'osx'
compilers_default = 'CC=gcc'
compilers_unusual = 'CC=clang'
compiler_flag_unusuals = ['-m32']
configure_flag_unusuals = [
'--enable-debug', '--enable-prof', '--disable-stats', '--disable-tcache',
]
all_unusuals = (
[os_unusual] + [compilers_unusual] + compiler_flag_unusuals
+ configure_flag_unusuals
)
unusual_combinations_to_test = []
for i in xrange(MAX_UNUSUAL_OPTIONS + 1):
unusual_combinations_to_test += combinations(all_unusuals, i)
include_rows = ""
for unusual_combination in unusual_combinations_to_test:
os = os_default
if os_unusual in unusual_combination:
os = os_unusual
compilers = compilers_default
if compilers_unusual in unusual_combination:
compilers = compilers_unusual
compiler_flags = [
x for x in unusual_combination if x in compiler_flag_unusuals]
configure_flags = [
x for x in unusual_combination if x in configure_flag_unusuals]
# Filter out an unsupported configuration - heap profiling on OS X.
if os == 'osx' and '--enable-prof' in configure_flags:
continue
env_string = '{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}"'.format(
compilers, " ".join(compiler_flags), " ".join(configure_flags))
include_rows += ' - os: %s\n' % os
include_rows += ' env: %s\n' % env_string
if '-m32' in unusual_combination and os == 'linux':
include_rows += ' addons:\n'
include_rows += ' apt:\n'
include_rows += ' packages:\n'
include_rows += ' - gcc-multilib\n'
print travis_template % include_rows

View File

@ -4,6 +4,8 @@
/******************************************************************************/
/* Data. */
bool opt_thp = true;
static bool thp_initially_huge;
purge_mode_t opt_purge = PURGE_DEFAULT;
const char *purge_mode_names[] = {
"ratio",
@ -568,8 +570,8 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t sn, bool zero)
arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
bool *gdump)
{
/*
@ -580,7 +582,7 @@ arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
extent_node_achunk_set(&chunk->node, true);
return (chunk_register(tsdn, chunk, &chunk->node));
return (chunk_register(chunk, &chunk->node, gdump));
}
static arena_chunk_t *
@ -591,6 +593,8 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
size_t sn;
malloc_mutex_unlock(tsdn, &arena->lock);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
NULL, chunksize, chunksize, &sn, zero, commit);
@ -603,16 +607,20 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
chunk = NULL;
}
}
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
*zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
if (chunk != NULL) {
bool gdump;
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0,
map_bias << LG_PAGE, arena->ind);
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
(void *)chunk, chunksize, sn, *zero, *commit);
chunk = NULL;
}
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
chunksize, sn, *zero, *commit);
chunk = NULL;
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
}
malloc_mutex_lock(tsdn, &arena->lock);
@ -627,14 +635,24 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t sn;
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
malloc_mutex_assert_owner(tsdn, &arena->lock);
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, &sn, zero, commit, true);
if (chunk != NULL) {
if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
bool gdump;
if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, sn, true);
return (NULL);
}
if (config_prof && opt_prof && gdump) {
malloc_mutex_unlock(tsdn, &arena->lock);
prof_gdump(tsdn);
malloc_mutex_lock(tsdn, &arena->lock);
}
}
if (chunk == NULL) {
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
@ -664,7 +682,9 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
if (chunk == NULL)
return (NULL);
chunk->hugepage = true;
if (config_thp && opt_thp) {
chunk->hugepage = thp_initially_huge;
}
/*
* Initialize the map to contain one maximal free untouched run. Mark
@ -729,14 +749,17 @@ arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
static void
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
size_t sn, hugepage;
size_t sn;
UNUSED bool hugepage JEMALLOC_CC_SILENCE_INIT(false);
bool committed;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
chunk_deregister(chunk, &chunk->node);
sn = extent_node_sn_get(&chunk->node);
hugepage = chunk->hugepage;
if (config_thp && opt_thp) {
hugepage = chunk->hugepage;
}
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
if (!committed) {
/*
@ -749,13 +772,16 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
arena->ind);
}
if (!hugepage) {
if (config_thp && opt_thp && hugepage != thp_initially_huge) {
/*
* Convert chunk back to the default state, so that all
* subsequent chunk allocations start out with chunks that can
* be backed by transparent huge pages.
* Convert chunk back to initial THP state, so that all
* subsequent chunk allocations start out in a consistent state.
*/
pages_huge(chunk, chunksize);
if (thp_initially_huge) {
pages_huge(chunk, chunksize);
} else {
pages_nohuge(chunk, chunksize);
}
}
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
@ -1695,13 +1721,13 @@ arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
/*
* If this is the first run purged within chunk, mark
* the chunk as non-huge. This will prevent all use of
* transparent huge pages for this chunk until the chunk
* as a whole is deallocated.
* the chunk as non-THP-capable. This will prevent all
* use of THPs for this chunk until the chunk as a whole
* is deallocated.
*/
if (chunk->hugepage) {
pages_nohuge(chunk, chunksize);
chunk->hugepage = false;
if (config_thp && opt_thp && chunk->hugepage) {
chunk->hugepage = pages_nohuge(chunk,
chunksize);
}
assert(pageind + npages <= chunk_npages);
@ -2694,6 +2720,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
return (arena_malloc_small(tsdn, arena, ind, zero));
if (likely(size <= large_maxclass))
return (arena_malloc_large(tsdn, arena, ind, zero));
assert(index2size(ind) >= chunksize);
return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
@ -3755,11 +3782,78 @@ bin_info_init(void)
#undef SC
}
static void
init_thp_initially_huge(void) {
int fd;
char buf[sizeof("[always] madvise never\n")];
ssize_t nread;
static const char *enabled_states[] = {
"[always] madvise never\n",
"always [madvise] never\n",
"always madvise [never]\n"
};
static const bool thp_initially_huge_states[] = {
true,
false,
false
};
unsigned i;
if (config_debug) {
for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
i++) {
assert(sizeof(buf) > strlen(enabled_states[i]));
}
}
assert(sizeof(enabled_states)/sizeof(const char *) ==
sizeof(thp_initially_huge_states)/sizeof(bool));
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
fd = (int)syscall(SYS_open,
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#else
fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#endif
if (fd == -1) {
goto label_error;
}
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
#else
nread = read(fd, &buf, sizeof(buf));
#endif
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
syscall(SYS_close, fd);
#else
close(fd);
#endif
if (nread < 1) {
goto label_error;
}
for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
i++) {
if (strncmp(buf, enabled_states[i], (size_t)nread) == 0) {
thp_initially_huge = thp_initially_huge_states[i];
return;
}
}
label_error:
thp_initially_huge = false;
}
void
arena_boot(void)
{
unsigned i;
if (config_thp && opt_thp) {
init_thp_initially_huge();
}
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
arena_decay_time_default_set(opt_decay_time);
@ -3790,15 +3884,8 @@ arena_boot(void)
arena_maxrun = chunksize - (map_bias << LG_PAGE);
assert(arena_maxrun > 0);
large_maxclass = index2size(size2index(chunksize)-1);
if (large_maxclass > arena_maxrun) {
/*
* For small chunk sizes it's possible for there to be fewer
* non-header pages available than are necessary to serve the
* size classes just below chunksize.
*/
large_maxclass = arena_maxrun;
}
assert(large_maxclass > 0);
assert(large_maxclass + large_pad <= arena_maxrun);
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;

View File

@ -141,7 +141,7 @@ chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
}
bool
chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
chunk_register(const void *chunk, const extent_node_t *node, bool *gdump)
{
assert(extent_node_addr_get(node) == chunk);
@ -160,8 +160,7 @@ chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
*/
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
prof_gdump(tsdn);
*gdump = (cur > high && prof_gdump_get_unlocked());
}
return (false);
@ -189,12 +188,17 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
static extent_node_t *
chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
{
extent_node_t *node;
size_t qsize;
extent_node_t key;
assert(size == CHUNK_CEILING(size));
extent_node_init(&key, arena, NULL, size, 0, false, false);
return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
qsize = extent_size_quantize_ceil(size);
extent_node_init(&key, arena, NULL, qsize, 0, false, false);
node = extent_tree_szsnad_nsearch(chunks_szsnad, &key);
assert(node == NULL || extent_node_size_get(node) >= size);
return node;
}
static void *

View File

@ -115,8 +115,9 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* malloc.
*/
while (true) {
void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
size_t gap_size, cpad_size;
void *ret, *max_cur, *dss_next, *dss_prev;
void *gap_addr_chunk, *gap_addr_subchunk;
size_t gap_size_chunk, gap_size_subchunk;
intptr_t incr;
max_cur = chunk_dss_max_update(new_addr);
@ -124,25 +125,32 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
goto label_oom;
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
chunksize_mask;
/*
* Compute how much chunk-aligned pad space (if any) is
* Compute how much chunk-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
* recycled for later use.
*/
cpad = (void *)((uintptr_t)dss_max + gap_size);
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
alignment);
cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
gap_addr_chunk = (void *)(CHUNK_CEILING(
(uintptr_t)max_cur));
ret = (void *)ALIGNMENT_CEILING(
(uintptr_t)gap_addr_chunk, alignment);
gap_size_chunk = (uintptr_t)ret -
(uintptr_t)gap_addr_chunk;
/*
* Compute the address just past the end of the desired
* allocation space.
*/
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max)
if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur)
goto label_oom; /* Wrap-around. */
incr = gap_size + cpad_size + size;
/* Compute the increment, including subchunk bytes. */
gap_addr_subchunk = max_cur;
gap_size_subchunk = (uintptr_t)ret -
(uintptr_t)gap_addr_subchunk;
incr = gap_size_subchunk + size;
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
/*
* Optimistically update dss_max, and roll back below if
@ -157,11 +165,12 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == max_cur) {
/* Success. */
if (cpad_size != 0) {
if (gap_size_chunk != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
&chunk_hooks, gap_addr_chunk,
gap_size_chunk,
arena_extent_sn_next(arena), false,
true);
}

View File

@ -84,6 +84,7 @@ CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
CTL_PROTO(config_tcache)
CTL_PROTO(config_thp)
CTL_PROTO(config_tls)
CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
@ -104,6 +105,7 @@ CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_thp)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
@ -258,6 +260,7 @@ static const ctl_named_node_t config_node[] = {
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)},
{NAME("tcache"), CTL(config_tcache)},
{NAME("thp"), CTL(config_thp)},
{NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)},
{NAME("valgrind"), CTL(config_valgrind)},
@ -281,6 +284,7 @@ static const ctl_named_node_t opt_node[] = {
{NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("thp"), CTL(opt_thp)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
@ -1268,6 +1272,7 @@ CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
CTL_RO_CONFIG_GEN(config_stats, bool)
CTL_RO_CONFIG_GEN(config_tcache, bool)
CTL_RO_CONFIG_GEN(config_thp, bool)
CTL_RO_CONFIG_GEN(config_tls, bool)
CTL_RO_CONFIG_GEN(config_utrace, bool)
CTL_RO_CONFIG_GEN(config_valgrind, bool)
@ -1291,6 +1296,7 @@ CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_thp, opt_thp, opt_thp, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
@ -1476,7 +1482,6 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
if (!config_tcache)
return (ENOENT);
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (tcaches_create(tsd, &tcache_ind)) {
ret = EFAULT;
@ -1486,8 +1491,7 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
return ret;
}
static int

View File

@ -3,13 +3,11 @@
/******************************************************************************/
/*
* Round down to the nearest chunk size that can actually be requested during
* normal huge allocation.
*/
JEMALLOC_INLINE_C size_t
extent_quantize(size_t size)
{
#ifndef JEMALLOC_JET
static
#endif
size_t
extent_size_quantize_floor(size_t size) {
size_t ret;
szind_t ind;
@ -25,11 +23,32 @@ extent_quantize(size_t size)
return (ret);
}
size_t
extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
ret = extent_size_quantize_floor(size);
if (ret < size) {
/*
* Skip a quantization that may have an adequately large extent,
* because under-sized extents may be mixed in. This only
* happens when an unusual size is requested, i.e. for aligned
* allocation, and is just one of several places where linear
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = index2size(size2index(ret + 1));
}
return ret;
}
JEMALLOC_INLINE_C int
extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
{
size_t a_qsize = extent_quantize(extent_node_size_get(a));
size_t b_qsize = extent_quantize(extent_node_size_get(b));
size_t a_qsize = extent_size_quantize_floor(extent_node_size_get(a));
size_t b_qsize = extent_size_quantize_floor(extent_node_size_get(b));
return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
}

View File

@ -15,20 +15,20 @@ huge_node_get(const void *ptr)
}
static bool
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
return (chunk_register(tsdn, ptr, node));
return (chunk_register(ptr, node, gdump));
}
static void
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node, bool *gdump)
{
bool err;
err = huge_node_set(tsdn, ptr, node);
err = huge_node_set(tsdn, ptr, node, gdump);
assert(!err);
}
@ -57,11 +57,13 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
arena_t *iarena;
extent_node_t *node;
size_t sn;
bool is_zeroed;
bool is_zeroed, gdump;
/* Allocate one or more contiguous chunks for this request. */
assert(!tsdn_null(tsdn) || arena != NULL);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
@ -91,11 +93,13 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
if (huge_node_set(tsdn, ret, node)) {
if (huge_node_set(tsdn, ret, node, &gdump)) {
arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
/* Insert node into huge. */
malloc_mutex_lock(tsdn, &arena->huge_mtx);
@ -144,7 +148,10 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
extent_node_t *node;
arena_t *arena;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
bool pre_zeroed, post_zeroed;
bool pre_zeroed, post_zeroed, gdump;
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/* Increase usize to incorporate extra. */
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
@ -178,10 +185,13 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_node_reset(tsdn, ptr, node, &gdump);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* gdump without any locks held. */
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
@ -207,7 +217,7 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
arena_t *arena;
chunk_hooks_t chunk_hooks;
size_t cdiff;
bool pre_zeroed, post_zeroed;
bool pre_zeroed, post_zeroed, gdump;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
@ -215,6 +225,8 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/* Split excess chunks. */
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
@ -241,10 +253,13 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
/* Update the size of the huge allocation. */
huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
huge_node_reset(tsdn, ptr, node);
huge_node_reset(tsdn, ptr, node, &gdump);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* gdump without any locks held. */
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
/* Zap the excess chunks. */
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
@ -258,7 +273,7 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
bool is_zeroed_subchunk, is_zeroed_chunk, gdump;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
@ -266,6 +281,9 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
is_zeroed_subchunk = extent_node_zeroed_get(node);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
/*
* Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
* update extent's zeroed field, and zero as necessary.
@ -280,8 +298,11 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
extent_node_size_set(node, usize);
extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
is_zeroed_chunk);
huge_node_reset(tsdn, ptr, node);
huge_node_reset(tsdn, ptr, node, &gdump);
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* gdump without any locks held. */
if (config_prof && opt_prof && gdump)
prof_gdump(tsdn);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {

View File

@ -790,18 +790,19 @@ stats_print_atexit(void)
* Begin initialization functions.
*/
#ifndef JEMALLOC_HAVE_SECURE_GETENV
static char *
secure_getenv(const char *name)
jemalloc_secure_getenv(const char *name)
{
#ifdef JEMALLOC_HAVE_SECURE_GETENV
return secure_getenv(name);
#else
# ifdef JEMALLOC_HAVE_ISSETUGID
if (issetugid() != 0)
return (NULL);
# endif
return (getenv(name));
}
#endif
}
static unsigned
malloc_ncpus(void)
@ -1018,7 +1019,7 @@ malloc_conf_init(void)
#endif
;
if ((opts = secure_getenv(envname)) != NULL) {
if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
@ -1074,18 +1075,18 @@ malloc_conf_init(void)
k, klen, v, vlen); \
} else if (clip) { \
if (CONF_MIN_##check_min(um, \
(min))) \
(t)(min))) \
o = (t)(min); \
else if (CONF_MAX_##check_max( \
um, (max))) \
um, (t)(max))) \
o = (t)(max); \
else \
o = (t)um; \
} else { \
if (CONF_MIN_##check_min(um, \
(min)) || \
(t)(min)) || \
CONF_MAX_##check_max(um, \
(max))) { \
(t)(max))) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
@ -1135,16 +1136,18 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_abort, "abort", true)
/*
* Chunks always require at least one header page,
* as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
* possibly an additional page in the presence of
* redzones. In order to simplify options processing,
* use a conservative bound that accommodates all these
* constraints.
* Chunks always require at least one header page, as
* many as 2^(LG_SIZE_CLASS_GROUP+1) data pages (plus an
* additional page in the presence of cache-oblivious
* large), and possibly an additional page in the
* presence of redzones. In order to simplify options
* processing, use a conservative bound that
* accommodates all these constraints.
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
(sizeof(size_t) << 3) - 1, yes, yes, true)
LG_SIZE_CLASS_GROUP + 1 + ((config_cache_oblivious
|| config_fill) ? 1 : 0), (sizeof(size_t) << 3) - 1,
yes, yes, true)
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
@ -1269,6 +1272,9 @@ malloc_conf_init(void)
"lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1)
}
if (config_thp) {
CONF_HANDLE_BOOL(opt_thp, "thp", true)
}
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof", true)
CONF_HANDLE_CHAR_P(opt_prof_prefix,
@ -2827,6 +2833,7 @@ _malloc_prefork(void)
witness_prefork(tsd);
/* Acquire all mutexes in a safe order. */
ctl_prefork(tsd_tsdn(tsd));
tcache_prefork(tsd_tsdn(tsd));
malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
prof_prefork0(tsd_tsdn(tsd));
for (i = 0; i < 3; i++) {
@ -2886,6 +2893,7 @@ _malloc_postfork(void)
}
prof_postfork_parent(tsd_tsdn(tsd));
malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
tcache_postfork_parent(tsd_tsdn(tsd));
ctl_postfork_parent(tsd_tsdn(tsd));
}
@ -2910,6 +2918,7 @@ jemalloc_postfork_child(void)
}
prof_postfork_child(tsd_tsdn(tsd));
malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
tcache_postfork_child(tsd_tsdn(tsd));
ctl_postfork_child(tsd_tsdn(tsd));
}

View File

@ -199,7 +199,7 @@ pages_huge(void *addr, size_t size)
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
#else
return (false);
@ -213,7 +213,7 @@ pages_nohuge(void *addr, size_t size)
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
#ifdef JEMALLOC_THP
#ifdef JEMALLOC_HAVE_MADVISE_HUGE
return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
#else
return (false);

97
src/stats.c Executable file → Normal file
View File

@ -39,7 +39,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool large, bool huge, unsigned i)
{
size_t page;
bool config_tcache, in_gap, in_gap_prev;
bool in_gap, in_gap_prev;
unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
@ -49,7 +49,6 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t\"bins\": [\n");
} else {
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
@ -137,8 +136,16 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
availregs = nregs * curruns;
milli = (availregs != 0) ? (1000 * curregs) / availregs
: 1000;
assert(milli <= 1000);
if (milli < 10) {
if (milli > 1000) {
/*
* Race detected: the counters were read in
* separate mallctl calls and concurrent
* operations happened in between. In this case
* no meaningful utilization can be computed.
*/
malloc_snprintf(util, sizeof(util), " race");
} else if (milli < 10) {
malloc_snprintf(util, sizeof(util),
"0.00%zu", milli);
} else if (milli < 100) {
@ -147,8 +154,10 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
} else if (milli < 1000) {
malloc_snprintf(util, sizeof(util), "0.%zu",
milli);
} else
} else {
assert(milli == 1000);
malloc_snprintf(util, sizeof(util), "1");
}
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
@ -536,7 +545,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
"\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\t},\n");
"\t\t\t\t}%s\n", (bins || large || huge) ? "," : "");
} else {
malloc_cprintf(write_cb, cbopaque,
"metadata: mapped: %zu, allocated: %zu\n",
@ -555,7 +564,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void
stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
bool json, bool merged, bool unmerged)
bool json, bool more)
{
const char *cpv;
bool bv;
@ -741,6 +750,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(xmalloc, ",")
OPT_WRITE_BOOL(tcache, ",")
OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
OPT_WRITE_BOOL(thp, ",")
OPT_WRITE_BOOL(prof, ",")
OPT_WRITE_CHAR_P(prof_prefix, ",")
OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
@ -838,9 +848,11 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nbins\": %u,\n", nbins);
CTL_GET("arenas.nhbins", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nhbins\": %u,\n", uv);
if (config_tcache) {
CTL_GET("arenas.nhbins", &uv, unsigned);
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"nhbins\": %u,\n", uv);
}
malloc_cprintf(write_cb, cbopaque,
"\t\t\t\"bin\": [\n");
@ -907,11 +919,11 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
"\t\t\t]\n");
malloc_cprintf(write_cb, cbopaque,
"\t\t},\n");
"\t\t}%s\n", (config_prof || more) ? "," : "");
}
/* prof. */
if (json) {
if (config_prof && json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\"prof\": {\n");
@ -937,8 +949,7 @@ stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
"\t\t\t\"lg_sample\": %zd\n", ssv);
malloc_cprintf(write_cb, cbopaque,
"\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
"");
"\t\t}%s\n", more ? "," : "");
}
}
@ -1023,31 +1034,37 @@ stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
narenas, bins, large, huge);
if (json) {
malloc_cprintf(write_cb, cbopaque,
"\t\t\t}%s\n", (ninitialized > 1) ?
"," : "");
"\t\t\t}%s\n", unmerged ? "," :
"");
}
}
/* Unmerged stats. */
for (i = j = 0; i < narenas; i++) {
if (initialized[i]) {
if (json) {
j++;
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t\"%u\": {\n", i);
} else {
malloc_cprintf(write_cb,
cbopaque, "\narenas[%u]:\n",
i);
}
stats_arena_print(write_cb, cbopaque,
json, i, bins, large, huge);
if (json) {
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t}%s\n", (j <
ninitialized) ? "," : "");
if (unmerged) {
for (i = j = 0; i < narenas; i++) {
if (initialized[i]) {
if (json) {
j++;
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t\"%u\": {\n",
i);
} else {
malloc_cprintf(write_cb,
cbopaque,
"\narenas[%u]:\n",
i);
}
stats_arena_print(write_cb,
cbopaque, json, i, bins,
large, huge);
if (json) {
malloc_cprintf(write_cb,
cbopaque,
"\t\t\t}%s\n", (j <
ninitialized) ? ","
: "");
}
}
}
}
@ -1069,8 +1086,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
size_t u64sz;
bool json = false;
bool general = true;
bool merged = true;
bool unmerged = true;
bool merged = config_stats;
bool unmerged = config_stats;
bool bins = true;
bool large = true;
bool huge = true;
@ -1137,8 +1154,10 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"___ Begin jemalloc statistics ___\n");
}
if (general)
stats_general_print(write_cb, cbopaque, json, merged, unmerged);
if (general) {
bool more = (merged || unmerged);
stats_general_print(write_cb, cbopaque, json, more);
}
if (config_stats) {
stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
bins, large, huge);

120
src/tcache.c Executable file → Normal file
View File

@ -21,6 +21,9 @@ static unsigned tcaches_past;
/* Head of singly linked list tracking available tcaches elements. */
static tcaches_t *tcaches_avail;
/* Protects tcaches{,_past,_avail}. */
static malloc_mutex_t tcaches_mtx;
/******************************************************************************/
size_t
@ -444,29 +447,56 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
}
}
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind)
{
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
static bool
tcaches_create_prep(tsd_t *tsd) {
bool err;
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches == NULL) {
tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
if (tcaches == NULL) {
err = true;
goto label_return;
}
}
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
arena = arena_ichoose(tsd, NULL);
if (unlikely(arena == NULL))
return (true);
tcache = tcache_create(tsd_tsdn(tsd), arena);
if (tcache == NULL)
return (true);
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
err = true;
goto label_return;
}
err = false;
label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
return err;
}
bool
tcaches_create(tsd_t *tsd, unsigned *r_ind) {
bool err;
arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches_create_prep(tsd)) {
err = true;
goto label_return;
}
arena = arena_ichoose(tsd, NULL);
if (unlikely(arena == NULL)) {
err = true;
goto label_return;
}
tcache = tcache_create(tsd_tsdn(tsd), arena);
if (tcache == NULL) {
err = true;
goto label_return;
}
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches_avail != NULL) {
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
@ -478,41 +508,50 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
*r_ind = tcaches_past;
tcaches_past++;
}
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
return (false);
err = false;
label_return:
malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &tcaches_mtx);
return err;
}
static void
tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
{
tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
if (elm->tcache == NULL)
if (elm->tcache == NULL) {
return;
}
tcache_destroy(tsd, elm->tcache);
elm->tcache = NULL;
}
void
tcaches_flush(tsd_t *tsd, unsigned ind)
{
tcaches_flush(tsd_t *tsd, unsigned ind) {
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
tcaches_elm_flush(tsd, &tcaches[ind]);
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
}
void
tcaches_destroy(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
tcaches_destroy(tsd_t *tsd, unsigned ind) {
tcaches_t *elm;
malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
elm = &tcaches[ind];
tcaches_elm_flush(tsd, elm);
elm->next = tcaches_avail;
tcaches_avail = elm;
malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
}
bool
tcache_boot(tsdn_t *tsdn)
{
tcache_boot(tsdn_t *tsdn) {
unsigned i;
cassert(config_tcache);
/*
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
* known.
@ -524,6 +563,10 @@ tcache_boot(tsdn_t *tsdn)
else
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES)) {
return true;
}
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
@ -553,3 +596,24 @@ tcache_boot(tsdn_t *tsdn)
return (false);
}
void
tcache_prefork(tsdn_t *tsdn) {
if (!config_prof && opt_tcache) {
malloc_mutex_prefork(tsdn, &tcaches_mtx);
}
}
void
tcache_postfork_parent(tsdn_t *tsdn) {
if (!config_prof && opt_tcache) {
malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
}
}
void
tcache_postfork_child(tsdn_t *tsdn) {
if (!config_prof && opt_tcache) {
malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
}
}

0
src/util.c Executable file → Normal file
View File

View File

@ -71,15 +71,16 @@ witness_not_owner_error_t *witness_not_owner_error =
#endif
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
#undef witness_depth_error
#define witness_depth_error JEMALLOC_N(n_witness_depth_error)
#endif
void
witness_lockless_error(const witness_list_t *witnesses)
{
witness_depth_error(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
witness_t *w;
malloc_printf("<jemalloc>: Should not own any locks:");
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
(depth != 1) ? "s" : "", rank_inclusive);
ql_foreach(w, witnesses, link) {
malloc_printf(" %s(%u)", w->name, w->rank);
}
@ -87,10 +88,9 @@ witness_lockless_error(const witness_list_t *witnesses)
abort();
}
#ifdef JEMALLOC_JET
#undef witness_lockless_error
#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
witness_lockless_error_t *witness_lockless_error =
JEMALLOC_N(n_witness_lockless_error);
#undef witness_depth_error
#define witness_depth_error JEMALLOC_N(witness_depth_error)
witness_depth_error_t *witness_depth_error = JEMALLOC_N(n_witness_depth_error);
#endif
void

View File

@ -3,6 +3,75 @@
# error "This source file is for zones on Darwin (OS X)."
#endif
/* Definitions of the following structs in malloc/malloc.h might be too old
* for the built binary to run on newer versions of OSX. So use the newest
* possible version of those structs.
*/
typedef struct _malloc_zone_t {
void *reserved1;
void *reserved2;
size_t (*size)(struct _malloc_zone_t *, const void *);
void *(*malloc)(struct _malloc_zone_t *, size_t);
void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
void *(*valloc)(struct _malloc_zone_t *, size_t);
void (*free)(struct _malloc_zone_t *, void *);
void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
void (*destroy)(struct _malloc_zone_t *);
const char *zone_name;
unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
struct malloc_introspection_t *introspect;
unsigned version;
void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
} malloc_zone_t;
typedef struct {
vm_address_t address;
vm_size_t size;
} vm_range_t;
typedef struct malloc_statistics_t {
unsigned blocks_in_use;
size_t size_in_use;
size_t max_size_in_use;
size_t size_allocated;
} malloc_statistics_t;
typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
typedef struct malloc_introspection_t {
kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
size_t (*good_size)(malloc_zone_t *, size_t);
boolean_t (*check)(malloc_zone_t *);
void (*print)(malloc_zone_t *, boolean_t);
void (*log)(malloc_zone_t *, void *);
void (*force_lock)(malloc_zone_t *);
void (*force_unlock)(malloc_zone_t *);
void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
boolean_t (*zone_locked)(malloc_zone_t *);
boolean_t (*enable_discharge_checking)(malloc_zone_t *);
boolean_t (*disable_discharge_checking)(malloc_zone_t *);
void (*discharge)(malloc_zone_t *, void *);
#ifdef __BLOCKS__
void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
#else
void *enumerate_unavailable_without_blocks;
#endif
void (*reinit_lock)(malloc_zone_t *);
} malloc_introspection_t;
extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
extern malloc_zone_t *malloc_default_zone(void);
extern void malloc_zone_register(malloc_zone_t *zone);
extern void malloc_zone_unregister(malloc_zone_t *zone);
/*
* The malloc_default_purgeable_zone() function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
@ -20,24 +89,35 @@ static struct malloc_introspection_t jemalloc_zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static size_t zone_size(malloc_zone_t *zone, void *ptr);
static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
#endif
static void *zone_destroy(malloc_zone_t *zone);
static void zone_destroy(malloc_zone_t *zone);
static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
void **results, unsigned num_requested);
static void zone_batch_free(struct _malloc_zone_t *zone,
void **to_be_freed, unsigned num_to_be_freed);
static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder);
static boolean_t zone_check(malloc_zone_t *zone);
static void zone_print(malloc_zone_t *zone, boolean_t verbose);
static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
static void zone_statistics(malloc_zone_t *zone,
malloc_statistics_t *stats);
static boolean_t zone_locked(malloc_zone_t *zone);
static void zone_reinit_lock(malloc_zone_t *zone);
/******************************************************************************/
/*
@ -45,7 +125,7 @@ static void zone_force_unlock(malloc_zone_t *zone);
*/
static size_t
zone_size(malloc_zone_t *zone, void *ptr)
zone_size(malloc_zone_t *zone, const void *ptr)
{
/*
@ -106,7 +186,6 @@ zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
return (realloc(ptr, size));
}
#if (JEMALLOC_ZONE_VERSION >= 5)
static void *
zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
{
@ -116,9 +195,7 @@ zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
return (ret);
}
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
@ -133,15 +210,46 @@ zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
free(ptr);
}
#endif
static void *
static void
zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
not_reached();
return (NULL);
}
static unsigned
zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
unsigned num_requested)
{
unsigned i;
for (i = 0; i < num_requested; i++) {
results[i] = je_malloc(size);
if (!results[i])
break;
}
return i;
}
static void
zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
unsigned num_to_be_freed)
{
unsigned i;
for (i = 0; i < num_to_be_freed; i++) {
zone_free(zone, to_be_freed[i]);
to_be_freed[i] = NULL;
}
}
static size_t
zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal)
{
return 0;
}
static size_t
@ -153,6 +261,30 @@ zone_good_size(malloc_zone_t *zone, size_t size)
return (s2u(size));
}
static kern_return_t
zone_enumerator(task_t task, void *data, unsigned type_mask,
vm_address_t zone_address, memory_reader_t reader,
vm_range_recorder_t recorder)
{
return KERN_SUCCESS;
}
static boolean_t
zone_check(malloc_zone_t *zone)
{
return true;
}
static void
zone_print(malloc_zone_t *zone, boolean_t verbose)
{
}
static void
zone_log(malloc_zone_t *zone, void *address)
{
}
static void
zone_force_lock(malloc_zone_t *zone)
{
@ -176,53 +308,69 @@ zone_force_unlock(malloc_zone_t *zone)
jemalloc_postfork_child();
}
static void
zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats)
{
/* We make no effort to actually fill the values */
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0;
stats->size_allocated = 0;
}
static boolean_t
zone_locked(malloc_zone_t *zone)
{
/* Pretend no lock is being held */
return false;
}
static void
zone_reinit_lock(malloc_zone_t *zone)
{
/* As of OSX 10.12, this function is only used when force_unlock would
* be used if the zone version were < 9. So just use force_unlock. */
zone_force_unlock(zone);
}
static void
zone_init(void)
{
jemalloc_zone.size = (void *)zone_size;
jemalloc_zone.malloc = (void *)zone_malloc;
jemalloc_zone.calloc = (void *)zone_calloc;
jemalloc_zone.valloc = (void *)zone_valloc;
jemalloc_zone.free = (void *)zone_free;
jemalloc_zone.realloc = (void *)zone_realloc;
jemalloc_zone.destroy = (void *)zone_destroy;
jemalloc_zone.size = zone_size;
jemalloc_zone.malloc = zone_malloc;
jemalloc_zone.calloc = zone_calloc;
jemalloc_zone.valloc = zone_valloc;
jemalloc_zone.free = zone_free;
jemalloc_zone.realloc = zone_realloc;
jemalloc_zone.destroy = zone_destroy;
jemalloc_zone.zone_name = "jemalloc_zone";
jemalloc_zone.batch_malloc = NULL;
jemalloc_zone.batch_free = NULL;
jemalloc_zone.batch_malloc = zone_batch_malloc;
jemalloc_zone.batch_free = zone_batch_free;
jemalloc_zone.introspect = &jemalloc_zone_introspect;
jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
#if (JEMALLOC_ZONE_VERSION >= 5)
jemalloc_zone.version = 9;
jemalloc_zone.memalign = zone_memalign;
#endif
#if (JEMALLOC_ZONE_VERSION >= 6)
jemalloc_zone.free_definite_size = zone_free_definite_size;
#endif
#if (JEMALLOC_ZONE_VERSION >= 8)
jemalloc_zone.pressure_relief = NULL;
#endif
jemalloc_zone.pressure_relief = zone_pressure_relief;
jemalloc_zone_introspect.enumerator = NULL;
jemalloc_zone_introspect.good_size = (void *)zone_good_size;
jemalloc_zone_introspect.check = NULL;
jemalloc_zone_introspect.print = NULL;
jemalloc_zone_introspect.log = NULL;
jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
jemalloc_zone_introspect.statistics = NULL;
#if (JEMALLOC_ZONE_VERSION >= 6)
jemalloc_zone_introspect.zone_locked = NULL;
#endif
#if (JEMALLOC_ZONE_VERSION >= 7)
jemalloc_zone_introspect.enumerator = zone_enumerator;
jemalloc_zone_introspect.good_size = zone_good_size;
jemalloc_zone_introspect.check = zone_check;
jemalloc_zone_introspect.print = zone_print;
jemalloc_zone_introspect.log = zone_log;
jemalloc_zone_introspect.force_lock = zone_force_lock;
jemalloc_zone_introspect.force_unlock = zone_force_unlock;
jemalloc_zone_introspect.statistics = zone_statistics;
jemalloc_zone_introspect.zone_locked = zone_locked;
jemalloc_zone_introspect.enable_discharge_checking = NULL;
jemalloc_zone_introspect.disable_discharge_checking = NULL;
jemalloc_zone_introspect.discharge = NULL;
# ifdef __BLOCKS__
#ifdef __BLOCKS__
jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
# else
#else
jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
# endif
#endif
jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
}
static malloc_zone_t *

0
test/integration/MALLOCX_ARENA.c Executable file → Normal file
View File

0
test/integration/allocated.c Executable file → Normal file
View File

View File

@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf = "junk:false";
#endif
static chunk_hooks_t orig_hooks;
static chunk_hooks_t old_hooks;

View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="junk:false"
fi

4
test/integration/mallocx.c Executable file → Normal file
View File

@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf = "junk:false";
#endif
static unsigned
get_nsizes_impl(const char *cmd)
{

View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="junk:false"
fi

0
test/integration/overflow.c Executable file → Normal file
View File

0
test/integration/rallocx.c Executable file → Normal file
View File

0
test/integration/thread_arena.c Executable file → Normal file
View File

0
test/integration/thread_tcache_enabled.c Executable file → Normal file
View File

4
test/integration/xallocx.c Executable file → Normal file
View File

@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf = "junk:false";
#endif
/*
* Use a separate arena for xallocx() extension/contraction tests so that
* internal allocation e.g. by heap profiling can't interpose allocations where

View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="junk:false"
fi

View File

@ -11,6 +11,18 @@ case @abi@ in
;;
esac
# Make a copy of the @JEMALLOC_CPREFIX@MALLOC_CONF passed in to this script, so
# it can be repeatedly concatenated with per test settings.
export MALLOC_CONF_ALL=${@JEMALLOC_CPREFIX@MALLOC_CONF}
# Concatenate the individual test's MALLOC_CONF and MALLOC_CONF_ALL.
export_malloc_conf() {
if [ "x${MALLOC_CONF}" != "x" -a "x${MALLOC_CONF_ALL}" != "x" ] ; then
export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF},${MALLOC_CONF_ALL}"
else
export @JEMALLOC_CPREFIX@MALLOC_CONF="${MALLOC_CONF}${MALLOC_CONF_ALL}"
fi
}
# Corresponds to test_status_t.
pass_code=0
skip_code=1
@ -24,7 +36,22 @@ for t in $@; do
echo
fi
echo "=== ${t} ==="
${t}@exe@ @abs_srcroot@ @abs_objroot@
if [ -e "@srcroot@${t}.sh" ] ; then
# Source the shell script corresponding to the test in a subshell and
# execute the test. This allows the shell script to set MALLOC_CONF, which
# is then used to set @JEMALLOC_CPREFIX@MALLOC_CONF (thus allowing the
# per test shell script to ignore the @JEMALLOC_CPREFIX@ detail).
$(enable_fill=@enable_fill@ \
enable_prof=@enable_prof@ \
enable_tcache=@enable_tcache@ \
. @srcroot@${t}.sh && \
export_malloc_conf && \
${t}@exe@ @abs_srcroot@ @abs_objroot@)
else
$(export MALLOC_CONF= && \
export_malloc_conf &&
${t}@exe@ @abs_srcroot@ @abs_objroot@)
fi
result_code=$?
case ${result_code} in
${pass_code})

4
test/unit/arena_reset.c Executable file → Normal file
View File

@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,lg_prof_sample:0";
#endif
static unsigned
get_nsizes_impl(const char *cmd)
{

5
test/unit/arena_reset.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,lg_prof_sample:0"
fi

2
test/unit/decay.c Executable file → Normal file
View File

@ -1,7 +1,5 @@
#include "test/jemalloc_test.h"
const char *malloc_conf = "purge:decay,decay_time:1";
static nstime_monotonic_t *nstime_monotonic_orig;
static nstime_update_t *nstime_update_orig;

3
test/unit/decay.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/sh
export MALLOC_CONF="purge:decay,decay_time:1"

View File

@ -0,0 +1,98 @@
#include "test/jemalloc_test.h"
TEST_BEGIN(test_huge_extent_size) {
unsigned nhchunks, i;
size_t sz, extent_size_prev, ceil_prev;
size_t mib[4];
size_t miblen = sizeof(mib) / sizeof(size_t);
/*
* Iterate over all huge size classes, get their extent sizes, and
* verify that the quantized size is the same as the extent size.
*/
sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL,
0), 0, "Unexpected mallctl failure");
assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0,
"Unexpected mallctlnametomib failure");
for (i = 0; i < nhchunks; i++) {
size_t extent_size, floor, ceil;
mib[2] = i;
sz = sizeof(size_t);
assert_d_eq(mallctlbymib(mib, miblen, (void *)&extent_size,
&sz, NULL, 0), 0, "Unexpected mallctlbymib failure");
floor = extent_size_quantize_floor(extent_size);
ceil = extent_size_quantize_ceil(extent_size);
assert_zu_eq(extent_size, floor,
"Extent quantization should be a no-op for precise size "
"(extent_size=%zu)", extent_size);
assert_zu_eq(extent_size, ceil,
"Extent quantization should be a no-op for precise size "
"(extent_size=%zu)", extent_size);
if (i > 0) {
assert_zu_eq(extent_size_prev,
extent_size_quantize_floor(extent_size - PAGE),
"Floor should be a precise size");
if (extent_size_prev < ceil_prev) {
assert_zu_eq(ceil_prev, extent_size,
"Ceiling should be a precise size "
"(extent_size_prev=%zu, ceil_prev=%zu, "
"extent_size=%zu)", extent_size_prev,
ceil_prev, extent_size);
}
}
if (i + 1 < nhchunks) {
extent_size_prev = floor;
ceil_prev = extent_size_quantize_ceil(extent_size +
PAGE);
}
}
}
TEST_END
TEST_BEGIN(test_monotonic) {
#define SZ_MAX ZU(4 * 1024 * 1024)
unsigned i;
size_t floor_prev, ceil_prev;
floor_prev = 0;
ceil_prev = 0;
for (i = 1; i <= SZ_MAX >> LG_PAGE; i++) {
size_t extent_size, floor, ceil;
extent_size = i << LG_PAGE;
floor = extent_size_quantize_floor(extent_size);
ceil = extent_size_quantize_ceil(extent_size);
assert_zu_le(floor, extent_size,
"Floor should be <= (floor=%zu, extent_size=%zu, ceil=%zu)",
floor, extent_size, ceil);
assert_zu_ge(ceil, extent_size,
"Ceiling should be >= (floor=%zu, extent_size=%zu, "
"ceil=%zu)", floor, extent_size, ceil);
assert_zu_le(floor_prev, floor, "Floor should be monotonic "
"(floor_prev=%zu, floor=%zu, extent_size=%zu, ceil=%zu)",
floor_prev, floor, extent_size, ceil);
assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic "
"(floor=%zu, extent_size=%zu, ceil_prev=%zu, ceil=%zu)",
floor, extent_size, ceil_prev, ceil);
floor_prev = floor;
ceil_prev = ceil;
}
}
TEST_END
int
main(void) {
return test(
test_huge_extent_size,
test_monotonic);
}

View File

@ -1,13 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
# ifndef JEMALLOC_TEST_JUNK_OPT
# define JEMALLOC_TEST_JUNK_OPT "junk:true"
# endif
const char *malloc_conf =
"abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT;
#endif
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig;
static huge_dalloc_junk_t *huge_dalloc_junk_orig;

5
test/unit/junk.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="abort:false,zero:false,redzone:true,quarantine:0,junk:true"
fi

View File

@ -1,3 +1 @@
#define JEMALLOC_TEST_JUNK_OPT "junk:alloc"
#include "junk.c"
#undef JEMALLOC_TEST_JUNK_OPT

5
test/unit/junk_alloc.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="abort:false,zero:false,redzone:true,quarantine:0,junk:alloc"
fi

View File

@ -1,3 +1 @@
#define JEMALLOC_TEST_JUNK_OPT "junk:free"
#include "junk.c"
#undef JEMALLOC_TEST_JUNK_OPT

5
test/unit/junk_free.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="abort:false,zero:false,redzone:true,quarantine:0,junk:free"
fi

View File

@ -1,12 +1,5 @@
#include "test/jemalloc_test.h"
/*
* Make sure that opt.lg_chunk clamping is sufficient. In practice, this test
* program will fail a debug assertion during initialization and abort (rather
* than the test soft-failing) if clamping is insufficient.
*/
const char *malloc_conf = "lg_chunk:0";
TEST_BEGIN(test_lg_chunk_clamp)
{
void *p;

6
test/unit/lg_chunk.sh Normal file
View File

@ -0,0 +1,6 @@
#!/bin/sh
# Make sure that opt.lg_chunk clamping is sufficient. In practice, this test
# program will fail a debug assertion during initialization and abort (rather
# than the test soft-failing) if clamping is insufficient.
export MALLOC_CONF="lg_chunk:0"

2
test/unit/mallctl.c Executable file → Normal file
View File

@ -142,6 +142,7 @@ TEST_BEGIN(test_mallctl_config)
TEST_MALLCTL_CONFIG(prof_libunwind, bool);
TEST_MALLCTL_CONFIG(stats, bool);
TEST_MALLCTL_CONFIG(tcache, bool);
TEST_MALLCTL_CONFIG(thp, bool);
TEST_MALLCTL_CONFIG(tls, bool);
TEST_MALLCTL_CONFIG(utrace, bool);
TEST_MALLCTL_CONFIG(valgrind, bool);
@ -182,6 +183,7 @@ TEST_BEGIN(test_mallctl_opt)
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
TEST_MALLCTL_OPT(bool, tcache, tcache);
TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache);
TEST_MALLCTL_OPT(bool, thp, thp);
TEST_MALLCTL_OPT(bool, prof, prof);
TEST_MALLCTL_OPT(const char *, prof_prefix, prof);
TEST_MALLCTL_OPT(bool, prof_active, prof);

View File

@ -1,13 +1,5 @@
#include "test/jemalloc_test.h"
const char *malloc_conf =
/* Use smallest possible chunk size. */
"lg_chunk:0"
/* Immediately purge to minimize fragmentation. */
",lg_dirty_mult:-1"
",decay_time:-1"
;
/*
* Size class that is a divisor of the page size, ideally 4+ regions per run.
*/

5
test/unit/pack.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
# Use smallest possible chunk size. Immediately purge to minimize
# fragmentation.
export MALLOC_CONF="lg_chunk:0,lg_dirty_mult:-1,decay_time:-1"

5
test/unit/prof_accum.c Executable file → Normal file
View File

@ -5,11 +5,6 @@
#define DUMP_INTERVAL 1
#define BT_COUNT_CHECK_INTERVAL 5
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0";
#endif
static int
prof_dump_open_intercept(bool propagate_err, const char *filename)
{

5
test/unit/prof_accum.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0"
fi

5
test/unit/prof_active.c Executable file → Normal file
View File

@ -1,10 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_thread_active_init:false,lg_prof_sample:0";
#endif
static void
mallctl_bool_get(const char *name, bool expected, const char *func, int line)
{

5
test/unit/prof_active.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,prof_thread_active_init:false,lg_prof_sample:0"
fi

4
test/unit/prof_gdump.c Executable file → Normal file
View File

@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true";
#endif
static bool did_prof_dump_open;
static int

6
test/unit/prof_gdump.sh Normal file
View File

@ -0,0 +1,6 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,prof_active:false,prof_gdump:true"
fi

6
test/unit/prof_idump.c Executable file → Normal file
View File

@ -1,11 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,"
"lg_prof_interval:0";
#endif
static bool did_prof_dump_open;
static int

7
test/unit/prof_idump.sh Normal file
View File

@ -0,0 +1,7 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,prof_accum:true,prof_active:false,lg_prof_sample:0,lg_prof_interval:0"
fi

5
test/unit/prof_reset.c Executable file → Normal file
View File

@ -1,10 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf =
"prof:true,prof_active:false,lg_prof_sample:0";
#endif
static int
prof_dump_open_intercept(bool propagate_err, const char *filename)
{

5
test/unit/prof_reset.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,prof_active:false,lg_prof_sample:0"
fi

5
test/unit/prof_tctx.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,lg_prof_sample:0"
fi

4
test/unit/prof_thread_name.c Executable file → Normal file
View File

@ -1,9 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_PROF
const char *malloc_conf = "prof:true,prof_active:false";
#endif
static void
mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func,
int line)

View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_prof}" = "x1" ] ; then
export MALLOC_CONF="prof:true,prof_active:false"
fi

View File

@ -1,13 +1,7 @@
#include "test/jemalloc_test.h"
/* Keep in sync with definition in quarantine.sh. */
#define QUARANTINE_SIZE 8192
#define STRINGIFY_HELPER(x) #x
#define STRINGIFY(x) STRINGIFY_HELPER(x)
#ifdef JEMALLOC_FILL
const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:"
STRINGIFY(QUARANTINE_SIZE);
#endif
void
quarantine_clear(void)

8
test/unit/quarantine.sh Normal file
View File

@ -0,0 +1,8 @@
#!/bin/sh
# Keep in sync with definition in quarantine.c.
export QUARANTINE_SIZE=8192
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="abort:false,junk:true,redzone:true,quarantine:${QUARANTINE_SIZE}"
fi

0
test/unit/size_classes.c Executable file → Normal file
View File

0
test/unit/stats.c Executable file → Normal file
View File

1005
test/unit/stats_print.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -3,12 +3,12 @@
static witness_lock_error_t *witness_lock_error_orig;
static witness_owner_error_t *witness_owner_error_orig;
static witness_not_owner_error_t *witness_not_owner_error_orig;
static witness_lockless_error_t *witness_lockless_error_orig;
static witness_depth_error_t *witness_depth_error_orig;
static bool saw_lock_error;
static bool saw_owner_error;
static bool saw_not_owner_error;
static bool saw_lockless_error;
static bool saw_depth_error;
static void
witness_lock_error_intercept(const witness_list_t *witnesses,
@ -33,10 +33,9 @@ witness_not_owner_error_intercept(const witness_t *witness)
}
static void
witness_lockless_error_intercept(const witness_list_t *witnesses)
{
saw_lockless_error = true;
witness_depth_error_intercept(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
saw_depth_error = true;
}
static int
@ -67,21 +66,36 @@ TEST_BEGIN(test_witness)
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 0);
witness_init(&a, "a", 1, NULL);
witness_assert_not_owner(tsdn, &a);
witness_lock(tsdn, &a);
witness_assert_owner(tsdn, &a);
witness_assert_depth(tsdn, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 0);
witness_init(&b, "b", 2, NULL);
witness_assert_not_owner(tsdn, &b);
witness_lock(tsdn, &b);
witness_assert_owner(tsdn, &b);
witness_assert_depth(tsdn, 2);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 2);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)3U, 0);
witness_unlock(tsdn, &a);
witness_assert_depth(tsdn, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)2U, 1);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)3U, 0);
witness_unlock(tsdn, &b);
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_assert_depth_to_rank(tsdn, (witness_rank_t)1U, 0);
}
TEST_END
@ -100,12 +114,15 @@ TEST_BEGIN(test_witness_comp)
witness_assert_not_owner(tsdn, &a);
witness_lock(tsdn, &a);
witness_assert_owner(tsdn, &a);
witness_assert_depth(tsdn, 1);
witness_init(&b, "b", 1, witness_comp);
witness_assert_not_owner(tsdn, &b);
witness_lock(tsdn, &b);
witness_assert_owner(tsdn, &b);
witness_assert_depth(tsdn, 2);
witness_unlock(tsdn, &b);
witness_assert_depth(tsdn, 1);
witness_lock_error_orig = witness_lock_error;
witness_lock_error = witness_lock_error_intercept;
@ -117,6 +134,7 @@ TEST_BEGIN(test_witness_comp)
witness_lock(tsdn, &c);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &c);
witness_assert_depth(tsdn, 1);
saw_lock_error = false;
@ -126,6 +144,7 @@ TEST_BEGIN(test_witness_comp)
witness_lock(tsdn, &d);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &d);
witness_assert_depth(tsdn, 1);
witness_unlock(tsdn, &a);
@ -154,11 +173,13 @@ TEST_BEGIN(test_witness_reversal)
witness_init(&b, "b", 2, NULL);
witness_lock(tsdn, &b);
witness_assert_depth(tsdn, 1);
assert_false(saw_lock_error, "Unexpected witness lock error");
witness_lock(tsdn, &a);
assert_true(saw_lock_error, "Expected witness lock error");
witness_unlock(tsdn, &a);
witness_assert_depth(tsdn, 1);
witness_unlock(tsdn, &b);
witness_assert_lockless(tsdn);
@ -232,35 +253,38 @@ TEST_BEGIN(test_witness_unlock_not_owned)
}
TEST_END
TEST_BEGIN(test_witness_lockful)
{
TEST_BEGIN(test_witness_depth) {
witness_t a;
tsdn_t *tsdn;
test_skip_if(!config_debug);
witness_lockless_error_orig = witness_lockless_error;
witness_lockless_error = witness_lockless_error_intercept;
saw_lockless_error = false;
witness_depth_error_orig = witness_depth_error;
witness_depth_error = witness_depth_error_intercept;
saw_depth_error = false;
tsdn = tsdn_fetch();
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_init(&a, "a", 1, NULL);
assert_false(saw_lockless_error, "Unexpected lockless error");
assert_false(saw_depth_error, "Unexpected depth error");
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_lock(tsdn, &a);
witness_assert_lockless(tsdn);
assert_true(saw_lockless_error, "Expected lockless error");
witness_assert_depth(tsdn, 0);
assert_true(saw_depth_error, "Expected depth error");
witness_unlock(tsdn, &a);
witness_assert_lockless(tsdn);
witness_assert_depth(tsdn, 0);
witness_lockless_error = witness_lockless_error_orig;
witness_depth_error = witness_depth_error_orig;
}
TEST_END
@ -268,11 +292,11 @@ int
main(void)
{
return (test(
return test(
test_witness,
test_witness_comp,
test_witness_reversal,
test_witness_recursive,
test_witness_unlock_not_owned,
test_witness_lockful));
test_witness_depth);
}

View File

@ -1,10 +1,5 @@
#include "test/jemalloc_test.h"
#ifdef JEMALLOC_FILL
const char *malloc_conf =
"abort:false,junk:false,zero:true,redzone:false,quarantine:0";
#endif
static void
test_zero(size_t sz_min, size_t sz_max)
{

5
test/unit/zero.sh Normal file
View File

@ -0,0 +1,5 @@
#!/bin/sh
if [ "x${enable_fill}" = "x1" ] ; then
export MALLOC_CONF="abort:false,junk:false,zero:true,redzone:false,quarantine:0"
fi