Remove Valgrind support.

This commit is contained in:
Jason Evans 2016-04-05 16:25:44 -07:00
parent a397045323
commit 9a8add1510
23 changed files with 33 additions and 409 deletions

View File

@ -169,9 +169,6 @@ any of the following arguments (not a definitive list) to 'configure':
See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option See the "opt.junk", "opt.zero", "opt.quarantine", and "opt.redzone" option
documentation for usage details. documentation for usage details.
--disable-valgrind
Disable support for Valgrind.
--disable-zone-allocator --disable-zone-allocator
Disable zone allocator for Darwin. This means jemalloc won't be hooked as Disable zone allocator for Darwin. This means jemalloc won't be hooked as
the default allocator on OSX/iOS. the default allocator on OSX/iOS.

View File

@ -49,7 +49,6 @@ cfgoutputs_out := @cfgoutputs_out@
enable_autogen := @enable_autogen@ enable_autogen := @enable_autogen@
enable_code_coverage := @enable_code_coverage@ enable_code_coverage := @enable_code_coverage@
enable_prof := @enable_prof@ enable_prof := @enable_prof@
enable_valgrind := @enable_valgrind@
enable_zone_allocator := @enable_zone_allocator@ enable_zone_allocator := @enable_zone_allocator@
MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
DSO_LDFLAGS = @DSO_LDFLAGS@ DSO_LDFLAGS = @DSO_LDFLAGS@
@ -104,9 +103,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/tsd.c \ $(srcroot)src/tsd.c \
$(srcroot)src/util.c \ $(srcroot)src/util.c \
$(srcroot)src/witness.c $(srcroot)src/witness.c
ifeq ($(enable_valgrind), 1)
C_SRCS += $(srcroot)src/valgrind.c
endif
ifeq ($(enable_zone_allocator), 1) ifeq ($(enable_zone_allocator), 1)
C_SRCS += $(srcroot)src/zone.c C_SRCS += $(srcroot)src/zone.c
endif endif

12
README
View File

@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came
into use as the FreeBSD libc allocator in 2005, and since then it has found its into use as the FreeBSD libc allocator in 2005, and since then it has found its
way into numerous applications that rely on its predictable behavior. In 2010 way into numerous applications that rely on its predictable behavior. In 2010
jemalloc development efforts broadened to include developer support features jemalloc development efforts broadened to include developer support features
such as heap profiling, Valgrind integration, and extensive monitoring/tuning such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, releases continue to be integrated back into FreeBSD, and therefore versatility
and therefore versatility remains critical. Ongoing development efforts trend remains critical. Ongoing development efforts trend toward making jemalloc
toward making jemalloc among the best allocators for a broad range of demanding among the best allocators for a broad range of demanding applications, and
applications, and eliminating/mitigating weaknesses that have practical eliminating/mitigating weaknesses that have practical repercussions for real
repercussions for real world applications. world applications.
The COPYING file contains copyright and licensing information. The COPYING file contains copyright and licensing information.

View File

@ -988,35 +988,6 @@ if test "x$enable_utrace" = "x1" ; then
fi fi
AC_SUBST([enable_utrace]) AC_SUBST([enable_utrace])
dnl Support Valgrind by default.
AC_ARG_ENABLE([valgrind],
[AS_HELP_STRING([--disable-valgrind], [Disable support for Valgrind])],
[if test "x$enable_valgrind" = "xno" ; then
enable_valgrind="0"
else
enable_valgrind="1"
fi
],
[enable_valgrind="1"]
)
if test "x$enable_valgrind" = "x1" ; then
JE_COMPILABLE([valgrind], [
#include <valgrind/valgrind.h>
#include <valgrind/memcheck.h>
#if !defined(VALGRIND_RESIZEINPLACE_BLOCK)
# error "Incompatible Valgrind version"
#endif
], [], [je_cv_valgrind])
if test "x${je_cv_valgrind}" = "xno" ; then
enable_valgrind="0"
fi
if test "x$enable_valgrind" = "x1" ; then
AC_DEFINE([JEMALLOC_VALGRIND], [ ])
fi
fi
AC_SUBST([enable_valgrind])
dnl Do not support the xmalloc option by default. dnl Do not support the xmalloc option by default.
AC_ARG_ENABLE([xmalloc], AC_ARG_ENABLE([xmalloc],
[AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])], [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
@ -1782,7 +1753,6 @@ AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
AC_MSG_RESULT([tcache : ${enable_tcache}]) AC_MSG_RESULT([tcache : ${enable_tcache}])
AC_MSG_RESULT([fill : ${enable_fill}]) AC_MSG_RESULT([fill : ${enable_fill}])
AC_MSG_RESULT([utrace : ${enable_utrace}]) AC_MSG_RESULT([utrace : ${enable_utrace}])
AC_MSG_RESULT([valgrind : ${enable_valgrind}])
AC_MSG_RESULT([xmalloc : ${enable_xmalloc}]) AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
AC_MSG_RESULT([munmap : ${enable_munmap}]) AC_MSG_RESULT([munmap : ${enable_munmap}])
AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}]) AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])

View File

@ -869,16 +869,6 @@ for (i = 0; i < nbins; i++) {
build configuration.</para></listitem> build configuration.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="config.valgrind">
<term>
<mallctl>config.valgrind</mallctl>
(<type>bool</type>)
<literal>r-</literal>
</term>
<listitem><para><option>--enable-valgrind</option> was specified during
build configuration.</para></listitem>
</varlistentry>
<varlistentry id="config.xmalloc"> <varlistentry id="config.xmalloc">
<term> <term>
<mallctl>config.xmalloc</mallctl> <mallctl>config.xmalloc</mallctl>
@ -1046,9 +1036,8 @@ for (i = 0; i < nbins; i++) {
"false", junk filling be disabled entirely. This is intended for "false", junk filling be disabled entirely. This is intended for
debugging and will impact performance negatively. This option is debugging and will impact performance negatively. This option is
"false" by default unless <option>--enable-debug</option> is specified "false" by default unless <option>--enable-debug</option> is specified
during configuration, in which case it is "true" by default unless during configuration, in which case it is "true" by
running inside <ulink default.</para></listitem>
url="http://valgrind.org/">Valgrind</ulink>.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.quarantine"> <varlistentry id="opt.quarantine">
@ -1063,13 +1052,9 @@ for (i = 0; i < nbins; i++) {
specified number of bytes of memory. The quarantined memory is not specified number of bytes of memory. The quarantined memory is not
freed until it is released from quarantine, though it is immediately freed until it is released from quarantine, though it is immediately
junk-filled if the <link junk-filled if the <link
linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is enabled.
enabled. This feature is of particular use in combination with <ulink This is intended for debugging and will impact performance negatively.
url="http://valgrind.org/">Valgrind</ulink>, which can detect attempts The default quarantine size is 0.</para></listitem>
to access quarantined objects. This is intended for debugging and will
impact performance negatively. The default quarantine size is 0 unless
running inside Valgrind, in which case the default is 16
MiB.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.redzone"> <varlistentry id="opt.redzone">
@ -1083,12 +1068,8 @@ for (i = 0; i < nbins; i++) {
allocations have redzones before and after them. Furthermore, if the allocations have redzones before and after them. Furthermore, if the
<link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
enabled, the redzones are checked for corruption during deallocation. enabled, the redzones are checked for corruption during deallocation.
However, the primary intended purpose of this feature is to be used in This option is intended for debugging and will impact performance
combination with <ulink url="http://valgrind.org/">Valgrind</ulink>, negatively. This option is disabled by default.</para></listitem>
which needs redzones in order to do effective buffer overflow/underflow
detection. This option is intended for debugging and will impact
performance negatively. This option is disabled by
default unless running inside Valgrind.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.zero"> <varlistentry id="opt.zero">
@ -1155,9 +1136,7 @@ malloc_conf = "xmalloc:true";]]></programlisting>
increased memory use. See the <link increased memory use. See the <link
linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link> linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
option for related tuning information. This option is enabled by option for related tuning information. This option is enabled by
default unless running inside <ulink default.</para></listitem>
url="http://valgrind.org/">Valgrind</ulink>, in which case it is
forcefully disabled.</para></listitem>
</varlistentry> </varlistentry>
<varlistentry id="opt.lg_tcache_max"> <varlistentry id="opt.lg_tcache_max">
@ -2746,9 +2725,7 @@ MAPPED_LIBRARIES:
<para>This implementation does not provide much detail about the problems <para>This implementation does not provide much detail about the problems
it detects, because the performance impact for storing such information it detects, because the performance impact for storing such information
would be prohibitive. However, jemalloc does integrate with the most would be prohibitive.</para>
excellent <ulink url="http://valgrind.org/">Valgrind</ulink> tool if the
<option>--enable-valgrind</option> configuration option is enabled.</para>
</refsect1> </refsect1>
<refsect1 id="diagnostic_messages"> <refsect1 id="diagnostic_messages">
<title>DIAGNOSTIC MESSAGES</title> <title>DIAGNOSTIC MESSAGES</title>

View File

@ -113,13 +113,6 @@ static const bool config_utrace =
false false
#endif #endif
; ;
static const bool config_valgrind =
#ifdef JEMALLOC_VALGRIND
true
#else
false
#endif
;
static const bool config_xmalloc = static const bool config_xmalloc =
#ifdef JEMALLOC_XMALLOC #ifdef JEMALLOC_XMALLOC
true true
@ -361,7 +354,6 @@ typedef unsigned szind_t;
#endif #endif
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
@ -393,7 +385,6 @@ typedef unsigned szind_t;
#define JEMALLOC_H_STRUCTS #define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
@ -441,8 +432,6 @@ extern bool opt_xmalloc;
extern bool opt_zero; extern bool opt_zero;
extern unsigned opt_narenas; extern unsigned opt_narenas;
extern bool in_valgrind;
/* Number of CPUs. */ /* Number of CPUs. */
extern unsigned ncpus; extern unsigned ncpus;
@ -489,7 +478,6 @@ void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void); void jemalloc_postfork_child(void);
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"
@ -521,7 +509,6 @@ void jemalloc_postfork_child(void);
#define JEMALLOC_H_INLINES #define JEMALLOC_H_INLINES
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/valgrind.h"
#include "jemalloc/internal/util.h" #include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/prng.h" #include "jemalloc/internal/prng.h"

View File

@ -148,9 +148,6 @@
/* Support utrace(2)-based tracing. */ /* Support utrace(2)-based tracing. */
#undef JEMALLOC_UTRACE #undef JEMALLOC_UTRACE
/* Support Valgrind. */
#undef JEMALLOC_VALGRIND
/* Support optional abort() on OOM. */ /* Support optional abort() on OOM. */
#undef JEMALLOC_XMALLOC #undef JEMALLOC_XMALLOC

View File

@ -296,7 +296,6 @@ iallocztm
iarena_cleanup iarena_cleanup
idalloc idalloc
idalloctm idalloctm
in_valgrind
index2size index2size
index2size_compute index2size_compute
index2size_lookup index2size_lookup
@ -591,10 +590,6 @@ tsdn_fetch
tsdn_null tsdn_null
tsdn_tsd tsdn_tsd
u2rz u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
witness_assert_lockless witness_assert_lockless
witness_assert_not_owner witness_assert_not_owner
witness_assert_owner witness_assert_owner

View File

@ -4,9 +4,6 @@
typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_obj_s quarantine_obj_t;
typedef struct quarantine_s quarantine_t; typedef struct quarantine_s quarantine_t;
/* Default per thread quarantine size if valgrind is enabled. */
#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24)
#endif /* JEMALLOC_H_TYPES */ #endif /* JEMALLOC_H_TYPES */
/******************************************************************************/ /******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS #ifdef JEMALLOC_H_STRUCTS

View File

@ -1,114 +0,0 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
#ifdef JEMALLOC_VALGRIND
#include <valgrind/valgrind.h>
/*
* The size that is reported to Valgrind must be consistent through a chain of
* malloc..realloc..realloc calls. Request size isn't recorded anywhere in
* jemalloc, so it is critical that all callers of these macros provide usize
* rather than request size. As a result, buffer overflow detection is
* technically weakened for the standard API, though it is generally accepted
* practice to consider any extra bytes reported by malloc_usable_size() as
* usable space.
*/
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_noaccess(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_undefined(ptr, usize); \
} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \
if (unlikely(in_valgrind)) \
valgrind_make_mem_defined(ptr, usize); \
} while (0)
/*
* The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
if (unlikely(in_valgrind && cond)) { \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
zero); \
} \
} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (unlikely(in_valgrind)) { \
size_t rzsize = p2rz(tsdn, ptr); \
\
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
usize, rzsize); \
if (zero && old_usize < usize) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
old_usize), usize - old_usize); \
} \
} else { \
if (!old_ptr_maybe_null || old_ptr != NULL) { \
valgrind_freelike_block(old_ptr, \
old_rzsize); \
} \
if (!ptr_maybe_null || ptr != NULL) { \
size_t copy_size = (old_usize < usize) \
? old_usize : usize; \
size_t tail_size = usize - copy_size; \
VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \
rzsize, false); \
if (copy_size > 0) { \
valgrind_make_mem_defined(ptr, \
copy_size); \
} \
if (zero && tail_size > 0) { \
valgrind_make_mem_defined( \
(void *)((uintptr_t)ptr + \
copy_size), tail_size); \
} \
} \
} \
} \
} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \
if (unlikely(in_valgrind)) \
valgrind_freelike_block(ptr, rzsize); \
} while (0)
#else
#define RUNNING_ON_VALGRIND ((unsigned)0)
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
#endif
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
#ifdef JEMALLOC_VALGRIND
void valgrind_make_mem_noaccess(void *ptr, size_t usize);
void valgrind_make_mem_undefined(void *ptr, size_t usize);
void valgrind_make_mem_defined(void *ptr, size_t usize);
void valgrind_freelike_block(void *ptr, size_t usize);
#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#endif /* JEMALLOC_H_INLINES */
/******************************************************************************/

View File

@ -74,7 +74,6 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\ticker.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\tsd.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_defs.h" />
<ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" /> <ClInclude Include="..\..\..\..\include\jemalloc\jemalloc_macros.h" />
@ -395,4 +394,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets"> <ImportGroup Label="ExtensionTargets">
</ImportGroup> </ImportGroup>
</Project> </Project>

View File

@ -161,9 +161,6 @@
<ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h"> <ClInclude Include="..\..\..\..\include\jemalloc\internal\util.h">
<Filter>Header Files\internal</Filter> <Filter>Header Files\internal</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\..\..\..\include\jemalloc\internal\valgrind.h">
<Filter>Header Files\internal</Filter>
</ClInclude>
<ClInclude Include="..\..\..\..\include\msvc_compat\strings.h"> <ClInclude Include="..\..\..\..\include\msvc_compat\strings.h">
<Filter>Header Files\msvc_compat</Filter> <Filter>Header Files\msvc_compat</Filter>
</ClInclude> </ClInclude>
@ -257,4 +254,4 @@
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
</ItemGroup> </ItemGroup>
</Project> </Project>

View File

@ -350,27 +350,16 @@ JEMALLOC_INLINE_C void
arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
{ {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (npages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
(npages << LG_PAGE)); (npages << LG_PAGE));
} }
JEMALLOC_INLINE_C void
arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
<< LG_PAGE)), PAGE);
}
JEMALLOC_INLINE_C void JEMALLOC_INLINE_C void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{ {
size_t i; size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++) for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0); assert(p[i] == 0);
} }
@ -471,12 +460,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
} }
if (zero) { if (zero) {
if (flag_decommitted != 0) { if (flag_decommitted != 0)
/* The run is untouched, and therefore zeroed. */ ; /* The run is untouched, and therefore zeroed. */
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void else if (flag_dirty != 0) {
*)((uintptr_t)chunk + (run_ind << LG_PAGE)),
(need_pages << LG_PAGE));
} else if (flag_dirty != 0) {
/* The run is dirty, so all pages must be zeroed. */ /* The run is dirty, so all pages must be zeroed. */
arena_run_zero(chunk, run_ind, need_pages); arena_run_zero(chunk, run_ind, need_pages);
} else { } else {
@ -492,15 +478,9 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
else if (config_debug) { else if (config_debug) {
arena_run_page_validate_zeroed(chunk, arena_run_page_validate_zeroed(chunk,
run_ind+i); run_ind+i);
} else {
arena_run_page_mark_zeroed(chunk,
run_ind+i);
} }
} }
} }
} else {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
} }
/* /*
@ -564,8 +544,6 @@ arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
arena_run_page_validate_zeroed(chunk, run_ind+i); arena_run_page_validate_zeroed(chunk, run_ind+i);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
(run_ind << LG_PAGE)), (need_pages << LG_PAGE));
return (false); return (false);
} }
@ -700,19 +678,9 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
* the chunk is not zeroed. * the chunk is not zeroed.
*/ */
if (!zero) { if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_bitselm_get_const(chunk, map_bias+1),
(size_t)((uintptr_t)arena_bitselm_get_const(chunk,
chunk_npages-1) -
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++) for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed); arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else { } else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get_const(chunk, map_bias+1),
(size_t)((uintptr_t)arena_bitselm_get_const(chunk,
chunk_npages-1) -
(uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) { if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) { for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) == assert(arena_mapbits_unzeroed_get(chunk, i) ==
@ -2571,13 +2539,11 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
} else if (unlikely(opt_zero)) } else if (unlikely(opt_zero))
memset(ret, 0, usize); memset(ret, 0, usize);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
} else { } else {
if (config_fill && unlikely(opt_junk_alloc)) { if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind], arena_alloc_junk_small(ret, &arena_bin_info[binind],
true); true);
} }
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
memset(ret, 0, usize); memset(ret, 0, usize);
} }
@ -3311,7 +3277,6 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
*/ */
copysize = (usize < oldsize) ? usize : oldsize; copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache, true); isqalloc(tsd, ptr, oldsize, tcache, true);
} else { } else {

View File

@ -24,7 +24,6 @@ base_node_try_alloc(tsdn_t *tsdn)
return (NULL); return (NULL);
node = base_nodes; node = base_nodes;
base_nodes = *(extent_node_t **)node; base_nodes = *(extent_node_t **)node;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
return (node); return (node);
} }
@ -34,7 +33,6 @@ base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
malloc_mutex_assert_owner(tsdn, &base_mtx); malloc_mutex_assert_owner(tsdn, &base_mtx);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes; *(extent_node_t **)node = base_nodes;
base_nodes = node; base_nodes = node;
} }
@ -123,7 +121,6 @@ base_alloc(tsdn_t *tsdn, size_t size)
base_resident += PAGE_CEILING((uintptr_t)ret + csize) - base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
PAGE_CEILING((uintptr_t)ret); PAGE_CEILING((uintptr_t)ret);
} }
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return: label_return:
malloc_mutex_unlock(tsdn, &base_mtx); malloc_mutex_unlock(tsdn, &base_mtx);
return (ret); return (ret);

View File

@ -316,7 +316,6 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t i; size_t i;
size_t *p = (size_t *)(uintptr_t)ret; size_t *p = (size_t *)(uintptr_t)ret;
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
for (i = 0; i < size / sizeof(size_t); i++) for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0); assert(p[i] == 0);
} }
@ -376,8 +375,6 @@ chunk_alloc_base(size_t size)
ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret); return (ret);
} }
@ -401,8 +398,6 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
assert(commit); assert(commit);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret); return (ret);
} }
@ -434,8 +429,6 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
commit, arena->dss_prec); commit, arena->dss_prec);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
if (config_valgrind)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret); return (ret);
} }
@ -478,8 +471,6 @@ chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
return (NULL); return (NULL);
} }
if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
return (ret); return (ret);
} }
@ -494,7 +485,6 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
assert(!cache || !zeroed); assert(!cache || !zeroed);
unzeroed = cache || !zeroed; unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
malloc_mutex_lock(tsdn, &arena->chunks_mtx); malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);

View File

@ -138,11 +138,8 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
&chunk_hooks, cpad, cpad_size, &chunk_hooks, cpad, cpad_size,
false, true); false, true);
} }
if (*zero) { if (*zero)
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
ret, size);
memset(ret, 0, size); memset(ret, 0, size);
}
if (!*commit) if (!*commit)
*commit = pages_decommit(ret, size); *commit = pages_decommit(ret, size);
return (ret); return (ret);

View File

@ -86,7 +86,6 @@ CTL_PROTO(config_stats)
CTL_PROTO(config_tcache) CTL_PROTO(config_tcache)
CTL_PROTO(config_tls) CTL_PROTO(config_tls)
CTL_PROTO(config_utrace) CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc) CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort) CTL_PROTO(opt_abort)
CTL_PROTO(opt_dss) CTL_PROTO(opt_dss)
@ -260,7 +259,6 @@ static const ctl_named_node_t config_node[] = {
{NAME("tcache"), CTL(config_tcache)}, {NAME("tcache"), CTL(config_tcache)},
{NAME("tls"), CTL(config_tls)}, {NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)}, {NAME("utrace"), CTL(config_utrace)},
{NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)} {NAME("xmalloc"), CTL(config_xmalloc)}
}; };
@ -1270,7 +1268,6 @@ CTL_RO_CONFIG_GEN(config_stats, bool)
CTL_RO_CONFIG_GEN(config_tcache, bool) CTL_RO_CONFIG_GEN(config_tcache, bool)
CTL_RO_CONFIG_GEN(config_tls, bool) CTL_RO_CONFIG_GEN(config_tls, bool)
CTL_RO_CONFIG_GEN(config_utrace, bool) CTL_RO_CONFIG_GEN(config_utrace, bool)
CTL_RO_CONFIG_GEN(config_valgrind, bool)
CTL_RO_CONFIG_GEN(config_xmalloc, bool) CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/ /******************************************************************************/
@ -1622,8 +1619,7 @@ arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
READONLY(); READONLY();
WRITEONLY(); WRITEONLY();
if ((config_valgrind && unlikely(in_valgrind)) || (config_fill && if (config_fill && unlikely(opt_quarantine)) {
unlikely(opt_quarantine))) {
ret = EFAULT; ret = EFAULT;
goto label_return; goto label_return;
} }

View File

@ -42,9 +42,6 @@ bool opt_xmalloc = false;
bool opt_zero = false; bool opt_zero = false;
unsigned opt_narenas = 0; unsigned opt_narenas = 0;
/* Initialized to true if the process is running inside Valgrind. */
bool in_valgrind;
unsigned ncpus; unsigned ncpus;
/* Protects arenas initialization. */ /* Protects arenas initialization. */
@ -80,8 +77,7 @@ enum {
flag_opt_quarantine = (1U << 2), flag_opt_quarantine = (1U << 2),
flag_opt_zero = (1U << 3), flag_opt_zero = (1U << 3),
flag_opt_utrace = (1U << 4), flag_opt_utrace = (1U << 4),
flag_in_valgrind = (1U << 5), flag_opt_xmalloc = (1U << 5)
flag_opt_xmalloc = (1U << 6)
}; };
static uint8_t malloc_slow_flags; static uint8_t malloc_slow_flags;
@ -894,9 +890,6 @@ malloc_slow_flag_init(void)
| (opt_utrace ? flag_opt_utrace : 0) | (opt_utrace ? flag_opt_utrace : 0)
| (opt_xmalloc ? flag_opt_xmalloc : 0); | (opt_xmalloc ? flag_opt_xmalloc : 0);
if (config_valgrind)
malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
malloc_slow = (malloc_slow_flags != 0); malloc_slow = (malloc_slow_flags != 0);
} }
@ -908,24 +901,6 @@ malloc_conf_init(void)
const char *opts, *k, *v; const char *opts, *k, *v;
size_t klen, vlen; size_t klen, vlen;
/*
* Automatically configure valgrind before processing options. The
* valgrind option remains in jemalloc 3.x for compatibility reasons.
*/
if (config_valgrind) {
in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
if (config_fill && unlikely(in_valgrind)) {
opt_junk = "false";
opt_junk_alloc = false;
opt_junk_free = false;
assert(!opt_zero);
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
opt_redzone = true;
}
if (config_tcache && unlikely(in_valgrind))
opt_tcache = false;
}
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
/* Get runtime configuration. */ /* Get runtime configuration. */
switch (i) { switch (i) {
@ -1183,19 +1158,7 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
} }
if (config_tcache) { if (config_tcache) {
CONF_HANDLE_BOOL(opt_tcache, "tcache", CONF_HANDLE_BOOL(opt_tcache, "tcache", true)
!config_valgrind || !in_valgrind)
if (CONF_MATCH("tcache")) {
assert(config_valgrind && in_valgrind);
if (opt_tcache) {
opt_tcache = false;
malloc_conf_error(
"tcache cannot be enabled "
"while running inside Valgrind",
k, klen, v, vlen);
}
continue;
}
CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
"lg_tcache_max", -1, "lg_tcache_max", -1,
(sizeof(size_t) << 3) - 1) (sizeof(size_t) << 3) - 1)
@ -1508,8 +1471,7 @@ ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
if (unlikely(ind >= NSIZES)) if (unlikely(ind >= NSIZES))
return (NULL); return (NULL);
if (config_stats || (config_prof && opt_prof) || (slow_path && if (config_stats || (config_prof && opt_prof)) {
config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind); *usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS); assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
} }
@ -1562,7 +1524,6 @@ je_malloc(size_t size)
ret = ialloc_body(size, false, &tsdn, &usize, true); ret = ialloc_body(size, false, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "malloc", true, true); ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
UTRACE(0, size, ret); UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
} }
return (ret); return (ret);
@ -1664,8 +1625,6 @@ label_return:
*tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_allocatedp_get(tsd) += usize;
} }
UTRACE(0, size, result); UTRACE(0, size, result);
JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
false);
witness_assert_lockless(tsd_tsdn(tsd)); witness_assert_lockless(tsd_tsdn(tsd));
return (ret); return (ret);
label_oom: label_oom:
@ -1684,11 +1643,8 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1)) JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size) je_posix_memalign(void **memptr, size_t alignment, size_t size)
{ {
int ret;
ret = imemalign(memptr, alignment, size, sizeof(void *)); return (imemalign(memptr, alignment, size, sizeof(void *)));
return (ret);
} }
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@ -1703,7 +1659,6 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL; ret = NULL;
set_errno(err); set_errno(err);
} }
return (ret); return (ret);
} }
@ -1739,7 +1694,6 @@ je_calloc(size_t num, size_t size)
ret = ialloc_body(num_size, true, &tsdn, &usize, true); ret = ialloc_body(num_size, true, &tsdn, &usize, true);
ialloc_post_check(ret, tsdn, usize, "calloc", true, true); ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
UTRACE(0, num_size, ret); UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
} }
return (ret); return (ret);
@ -1792,7 +1746,6 @@ JEMALLOC_INLINE_C void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
{ {
size_t usize; size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd)); witness_assert_lockless(tsd_tsdn(tsd));
@ -1802,25 +1755,20 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
prof_free(tsd, ptr, usize); prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind) } else if (config_stats)
usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_stats) if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += usize;
if (likely(!slow_path)) if (likely(!slow_path))
iqalloc(tsd, ptr, tcache, false); iqalloc(tsd, ptr, tcache, false);
else { else
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(tsd_tsdn(tsd), ptr);
iqalloc(tsd, ptr, tcache, true); iqalloc(tsd, ptr, tcache, true);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
} }
JEMALLOC_INLINE_C void JEMALLOC_INLINE_C void
isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
{ {
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
witness_assert_lockless(tsd_tsdn(tsd)); witness_assert_lockless(tsd_tsdn(tsd));
@ -1831,10 +1779,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
prof_free(tsd, ptr, usize); prof_free(tsd, ptr, usize);
if (config_stats) if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize; *tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
rzsize = p2rz(tsd_tsdn(tsd), ptr);
isqalloc(tsd, ptr, usize, tcache, slow_path); isqalloc(tsd, ptr, usize, tcache, slow_path);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
} }
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@ -1846,7 +1791,6 @@ je_realloc(void *ptr, size_t size)
tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0; size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) { if (unlikely(size == 0)) {
if (ptr != NULL) { if (ptr != NULL) {
@ -1871,18 +1815,13 @@ je_realloc(void *ptr, size_t size)
witness_assert_lockless(tsd_tsdn(tsd)); witness_assert_lockless(tsd_tsdn(tsd));
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind)) {
old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
u2rz(old_usize);
}
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = s2u(size); usize = s2u(size);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
NULL : irealloc_prof(tsd, ptr, old_usize, usize); NULL : irealloc_prof(tsd, ptr, old_usize, usize);
} else { } else {
if (config_stats || (config_valgrind && if (config_stats)
unlikely(in_valgrind)))
usize = s2u(size); usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false); ret = iralloc(tsd, ptr, old_usize, size, 0, false);
} }
@ -1913,8 +1852,6 @@ je_realloc(void *ptr, size_t size)
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
UTRACE(ptr, size, ret); UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
old_rzsize, true, false);
witness_assert_lockless(tsdn); witness_assert_lockless(tsdn);
return (ret); return (ret);
} }
@ -2143,8 +2080,7 @@ imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
szind_t ind = size2index(size); szind_t ind = size2index(size);
if (unlikely(ind >= NSIZES)) if (unlikely(ind >= NSIZES))
return (NULL); return (NULL);
if (config_stats || (config_prof && opt_prof) || (slow_path && if (config_stats || (config_prof && opt_prof)) {
config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind); *usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS); assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
} }
@ -2181,8 +2117,6 @@ je_mallocx(size_t size, int flags)
p = imallocx_body(size, flags, &tsdn, &usize, true); p = imallocx_body(size, flags, &tsdn, &usize, true);
ialloc_post_check(p, tsdn, usize, "mallocx", false, true); ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
UTRACE(0, size, p); UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
MALLOCX_ZERO_GET(flags));
} }
return (p); return (p);
@ -2261,7 +2195,6 @@ je_rallocx(void *ptr, size_t size, int flags)
tsd_t *tsd; tsd_t *tsd;
size_t usize; size_t usize;
size_t old_usize; size_t old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = MALLOCX_ALIGN_GET(flags); size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO; bool zero = flags & MALLOCX_ZERO;
arena_t *arena; arena_t *arena;
@ -2291,8 +2224,6 @@ je_rallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, true); tcache = tcache_get(tsd, true);
old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
@ -2307,7 +2238,7 @@ je_rallocx(void *ptr, size_t size, int flags)
tcache, arena); tcache, arena);
if (unlikely(p == NULL)) if (unlikely(p == NULL))
goto label_oom; goto label_oom;
if (config_stats || (config_valgrind && unlikely(in_valgrind))) if (config_stats)
usize = isalloc(tsd_tsdn(tsd), p, config_prof); usize = isalloc(tsd_tsdn(tsd), p, config_prof);
} }
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@ -2317,8 +2248,6 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
UTRACE(ptr, size, p); UTRACE(ptr, size, p);
JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
old_usize, old_rzsize, false, zero);
witness_assert_lockless(tsd_tsdn(tsd)); witness_assert_lockless(tsd_tsdn(tsd));
return (p); return (p);
label_oom: label_oom:
@ -2413,7 +2342,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
{ {
tsd_t *tsd; tsd_t *tsd;
size_t usize, old_usize; size_t usize, old_usize;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = MALLOCX_ALIGN_GET(flags); size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO; bool zero = flags & MALLOCX_ZERO;
@ -2443,9 +2371,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
if (unlikely(HUGE_MAXCLASS - size < extra)) if (unlikely(HUGE_MAXCLASS - size < extra))
extra = HUGE_MAXCLASS - size; extra = HUGE_MAXCLASS - size;
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) { if (config_prof && opt_prof) {
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero); alignment, zero);
@ -2460,8 +2385,6 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize; *tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize; *tsd_thread_deallocatedp_get(tsd) += old_usize;
} }
JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
old_usize, old_rzsize, false, zero);
label_not_resized: label_not_resized:
UTRACE(ptr, size, ptr); UTRACE(ptr, size, ptr);
witness_assert_lockless(tsd_tsdn(tsd)); witness_assert_lockless(tsd_tsdn(tsd));

View File

@ -150,12 +150,7 @@ quarantine(tsd_t *tsd, void *ptr)
quarantine->curbytes += usize; quarantine->curbytes += usize;
quarantine->curobjs++; quarantine->curobjs++;
if (config_fill && unlikely(opt_junk_free)) { if (config_fill && unlikely(opt_junk_free)) {
/* if (usize <= SMALL_MAXCLASS)
* Only do redzone validation if Valgrind isn't in
* operation.
*/
if ((!config_valgrind || likely(!in_valgrind))
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize); arena_quarantine_junk_small(ptr, usize);
else else
memset(ptr, JEMALLOC_FREE_JUNK, usize); memset(ptr, JEMALLOC_FREE_JUNK, usize);

View File

@ -517,7 +517,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_BOOL(redzone) OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero) OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace) OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc) OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache) OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max) OPT_WRITE_SSIZE_T(lg_tcache_max)

View File

@ -1,34 +0,0 @@
#include "jemalloc/internal/jemalloc_internal.h"
#ifndef JEMALLOC_VALGRIND
# error "This source file is for Valgrind integration."
#endif
#include <valgrind/memcheck.h>
void
valgrind_make_mem_noaccess(void *ptr, size_t usize)
{
VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
}
void
valgrind_make_mem_undefined(void *ptr, size_t usize)
{
VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
}
void
valgrind_make_mem_defined(void *ptr, size_t usize)
{
VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
}
void
valgrind_freelike_block(void *ptr, size_t usize)
{
VALGRIND_FREELIKE_BLOCK(ptr, usize);
}

View File

@ -88,8 +88,7 @@ TEST_BEGIN(test_arena_reset)
size_t mib[3]; size_t mib[3];
tsdn_t *tsdn; tsdn_t *tsdn;
test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill test_skip_if(config_fill && unlikely(opt_quarantine));
&& unlikely(opt_quarantine)));
sz = sizeof(unsigned); sz = sizeof(unsigned);
assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0, assert_d_eq(mallctl("arenas.extend", &arena_ind, &sz, NULL, 0), 0,

View File

@ -139,7 +139,6 @@ TEST_BEGIN(test_mallctl_config)
TEST_MALLCTL_CONFIG(tcache, bool); TEST_MALLCTL_CONFIG(tcache, bool);
TEST_MALLCTL_CONFIG(tls, bool); TEST_MALLCTL_CONFIG(tls, bool);
TEST_MALLCTL_CONFIG(utrace, bool); TEST_MALLCTL_CONFIG(utrace, bool);
TEST_MALLCTL_CONFIG(valgrind, bool);
TEST_MALLCTL_CONFIG(xmalloc, bool); TEST_MALLCTL_CONFIG(xmalloc, bool);
#undef TEST_MALLCTL_CONFIG #undef TEST_MALLCTL_CONFIG