Merge branch 'dev'
This commit is contained in:
commit
9898051fd1
57
ChangeLog
57
ChangeLog
@ -4,6 +4,63 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
|
|
||||||
https://github.com/jemalloc/jemalloc
|
https://github.com/jemalloc/jemalloc
|
||||||
|
|
||||||
|
* 4.0.1 (September 15, 2015)
|
||||||
|
|
||||||
|
This is a bugfix release that is somewhat high risk due to the amount of
|
||||||
|
refactoring required to address deep xallocx() problems. As a side effect of
|
||||||
|
these fixes, xallocx() now tries harder to partially fulfill requests for
|
||||||
|
optional extra space. Note that a couple of minor heap profiling
|
||||||
|
optimizations are included, but these are better thought of as performance
|
||||||
|
fixes that were integral to disovering most of the other bugs.
|
||||||
|
|
||||||
|
Optimizations:
|
||||||
|
- Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the
|
||||||
|
fast path when heap profiling is enabled. Additionally, split a special
|
||||||
|
case out into arena_prof_tctx_reset(), which also avoids chunk metadata
|
||||||
|
reads.
|
||||||
|
- Optimize irallocx_prof() to optimistically update the sampler state. The
|
||||||
|
prior implementation appears to have been a holdover from when
|
||||||
|
rallocx()/xallocx() functionality was combined as rallocm().
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
- Fix TLS configuration such that it is enabled by default for platforms on
|
||||||
|
which it works correctly.
|
||||||
|
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
|
||||||
|
allocation/deallocation within the application's thread-specific data
|
||||||
|
cleanup functions even after arenas_cache is torn down.
|
||||||
|
- Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS.
|
||||||
|
- Fix chunk purge hook calls for in-place huge shrinking reallocation to
|
||||||
|
specify the old chunk size rather than the new chunk size. This bug caused
|
||||||
|
no correctness issues for the default chunk purge function, but was
|
||||||
|
visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
|
||||||
|
- Fix heap profiling bugs:
|
||||||
|
+ Fix heap profiling to distinguish among otherwise identical sample sites
|
||||||
|
with interposed resets (triggered via the "prof.reset" mallctl). This bug
|
||||||
|
could cause data structure corruption that would most likely result in a
|
||||||
|
segfault.
|
||||||
|
+ Fix irealloc_prof() to prof_alloc_rollback() on OOM.
|
||||||
|
+ Make one call to prof_active_get_unlocked() per allocation event, and use
|
||||||
|
the result throughout the relevant functions that handle an allocation
|
||||||
|
event. Also add a missing check in prof_realloc(). These fixes protect
|
||||||
|
allocation events against concurrent prof_active changes.
|
||||||
|
+ Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample()
|
||||||
|
in the correct order.
|
||||||
|
+ Fix prof_realloc() to call prof_free_sampled_object() after calling
|
||||||
|
prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were
|
||||||
|
the same, the tctx could have been prematurely destroyed.
|
||||||
|
- Fix portability bugs:
|
||||||
|
+ Don't bitshift by negative amounts when encoding/decoding run sizes in
|
||||||
|
chunk header maps. This affected systems with page sizes greater than 8
|
||||||
|
KiB.
|
||||||
|
+ Rename index_t to szind_t to avoid an existing type on Solaris.
|
||||||
|
+ Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
|
||||||
|
match glibc and avoid compilation errors when including both
|
||||||
|
jemalloc/jemalloc.h and malloc.h in C++ code.
|
||||||
|
+ Don't assume that /bin/sh is appropriate when running size_classes.sh
|
||||||
|
during configuration.
|
||||||
|
+ Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM.
|
||||||
|
+ Link tests to librt if it contains clock_gettime(2).
|
||||||
|
|
||||||
* 4.0.0 (August 17, 2015)
|
* 4.0.0 (August 17, 2015)
|
||||||
|
|
||||||
This version contains many speed and space optimizations, both minor and
|
This version contains many speed and space optimizations, both minor and
|
||||||
|
17
Makefile.in
17
Makefile.in
@ -28,6 +28,7 @@ CFLAGS := @CFLAGS@
|
|||||||
LDFLAGS := @LDFLAGS@
|
LDFLAGS := @LDFLAGS@
|
||||||
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
|
EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
|
||||||
LIBS := @LIBS@
|
LIBS := @LIBS@
|
||||||
|
TESTLIBS := @TESTLIBS@
|
||||||
RPATH_EXTRA := @RPATH_EXTRA@
|
RPATH_EXTRA := @RPATH_EXTRA@
|
||||||
SO := @so@
|
SO := @so@
|
||||||
IMPORTLIB := @importlib@
|
IMPORTLIB := @importlib@
|
||||||
@ -265,15 +266,15 @@ $(STATIC_LIBS):
|
|||||||
|
|
||||||
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
|
$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(TESTS_UNIT_LINK_OBJS) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
|
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(EXTRA_LDFLAGS)
|
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(filter -lpthread,$(LIBS))) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
|
||||||
@mkdir -p $(@D)
|
@mkdir -p $(@D)
|
||||||
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
|
$(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(TESTLIBS) $(EXTRA_LDFLAGS)
|
||||||
|
|
||||||
build_lib_shared: $(DSOS)
|
build_lib_shared: $(DSOS)
|
||||||
build_lib_static: $(STATIC_LIBS)
|
build_lib_static: $(STATIC_LIBS)
|
||||||
@ -343,9 +344,9 @@ check_unit_dir:
|
|||||||
@mkdir -p $(objroot)test/unit
|
@mkdir -p $(objroot)test/unit
|
||||||
check_integration_dir:
|
check_integration_dir:
|
||||||
@mkdir -p $(objroot)test/integration
|
@mkdir -p $(objroot)test/integration
|
||||||
check_stress_dir:
|
stress_dir:
|
||||||
@mkdir -p $(objroot)test/stress
|
@mkdir -p $(objroot)test/stress
|
||||||
check_dir: check_unit_dir check_integration_dir check_stress_dir
|
check_dir: check_unit_dir check_integration_dir
|
||||||
|
|
||||||
check_unit: tests_unit check_unit_dir
|
check_unit: tests_unit check_unit_dir
|
||||||
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
|
||||||
@ -355,10 +356,10 @@ ifeq ($(enable_prof), 1)
|
|||||||
endif
|
endif
|
||||||
check_integration: tests_integration check_integration_dir
|
check_integration: tests_integration check_integration_dir
|
||||||
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
||||||
check_stress: tests_stress check_stress_dir
|
stress: tests_stress stress_dir
|
||||||
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
|
||||||
check: tests check_dir check_integration_prof
|
check: tests check_dir check_integration_prof
|
||||||
$(SHELL) $(objroot)test/test.sh $(TESTS:$(srcroot)%.c=$(objroot)%)
|
$(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%)
|
||||||
|
|
||||||
ifeq ($(enable_code_coverage), 1)
|
ifeq ($(enable_code_coverage), 1)
|
||||||
coverage_unit: check_unit
|
coverage_unit: check_unit
|
||||||
@ -372,7 +373,7 @@ coverage_integration: check_integration
|
|||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
|
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src integration $(C_TESTLIB_INTEGRATION_OBJS)
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
|
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/integration integration $(TESTS_INTEGRATION_OBJS)
|
||||||
|
|
||||||
coverage_stress: check_stress
|
coverage_stress: stress
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
|
$(SHELL) $(srcroot)coverage.sh $(srcroot)src pic $(C_PIC_OBJS)
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
|
$(SHELL) $(srcroot)coverage.sh $(srcroot)src jet $(C_JET_OBJS)
|
||||||
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
|
$(SHELL) $(srcroot)coverage.sh $(srcroot)test/src stress $(C_TESTLIB_STRESS_OBJS)
|
||||||
|
26
configure.ac
26
configure.ac
@ -1190,6 +1190,14 @@ fi
|
|||||||
|
|
||||||
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
|
CPPFLAGS="$CPPFLAGS -D_REENTRANT"
|
||||||
|
|
||||||
|
dnl Check whether clock_gettime(2) is in libc or librt. This function is only
|
||||||
|
dnl used in test code, so save the result to TESTLIBS to avoid poluting LIBS.
|
||||||
|
SAVED_LIBS="${LIBS}"
|
||||||
|
LIBS=
|
||||||
|
AC_SEARCH_LIBS([clock_gettime], [rt], [TESTLIBS="${LIBS}"])
|
||||||
|
AC_SUBST([TESTLIBS])
|
||||||
|
LIBS="${SAVED_LIBS}"
|
||||||
|
|
||||||
dnl Check if the GNU-specific secure_getenv function exists.
|
dnl Check if the GNU-specific secure_getenv function exists.
|
||||||
AC_CHECK_FUNC([secure_getenv],
|
AC_CHECK_FUNC([secure_getenv],
|
||||||
[have_secure_getenv="1"],
|
[have_secure_getenv="1"],
|
||||||
@ -1272,13 +1280,16 @@ fi
|
|||||||
,
|
,
|
||||||
enable_tls=""
|
enable_tls=""
|
||||||
)
|
)
|
||||||
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then
|
if test "x${enable_tls}" = "x" ; then
|
||||||
|
if test "x${force_tls}" = "x1" ; then
|
||||||
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
|
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
|
||||||
enable_tls="1"
|
enable_tls="1"
|
||||||
fi
|
elif test "x${force_tls}" = "x0" ; then
|
||||||
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then
|
|
||||||
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
|
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
|
||||||
enable_tls="0"
|
enable_tls="0"
|
||||||
|
else
|
||||||
|
enable_tls="1"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
if test "x${enable_tls}" = "x1" ; then
|
if test "x${enable_tls}" = "x1" ; then
|
||||||
AC_MSG_CHECKING([for TLS])
|
AC_MSG_CHECKING([for TLS])
|
||||||
@ -1298,9 +1309,12 @@ else
|
|||||||
fi
|
fi
|
||||||
AC_SUBST([enable_tls])
|
AC_SUBST([enable_tls])
|
||||||
if test "x${enable_tls}" = "x1" ; then
|
if test "x${enable_tls}" = "x1" ; then
|
||||||
|
if test "x${force_tls}" = "x0" ; then
|
||||||
|
AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
|
||||||
|
fi
|
||||||
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
|
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
|
||||||
elif test "x${force_tls}" = "x1" ; then
|
elif test "x${force_tls}" = "x1" ; then
|
||||||
AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function])
|
AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dnl ============================================================================
|
dnl ============================================================================
|
||||||
@ -1615,8 +1629,9 @@ AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
|
|||||||
])
|
])
|
||||||
AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
|
AC_CONFIG_COMMANDS([include/jemalloc/internal/size_classes.h], [
|
||||||
mkdir -p "${objroot}include/jemalloc/internal"
|
mkdir -p "${objroot}include/jemalloc/internal"
|
||||||
"${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
|
"${SHELL}" "${srcdir}/include/jemalloc/internal/size_classes.sh" "${LG_QUANTA}" ${LG_TINY_MIN} "${LG_PAGE_SIZES}" ${LG_SIZE_CLASS_GROUP} > "${objroot}include/jemalloc/internal/size_classes.h"
|
||||||
], [
|
], [
|
||||||
|
SHELL="${SHELL}"
|
||||||
srcdir="${srcdir}"
|
srcdir="${srcdir}"
|
||||||
objroot="${objroot}"
|
objroot="${objroot}"
|
||||||
LG_QUANTA="${LG_QUANTA}"
|
LG_QUANTA="${LG_QUANTA}"
|
||||||
@ -1687,6 +1702,7 @@ AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
|
|||||||
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
|
AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
|
||||||
AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
|
AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
|
||||||
AC_MSG_RESULT([LIBS : ${LIBS}])
|
AC_MSG_RESULT([LIBS : ${LIBS}])
|
||||||
|
AC_MSG_RESULT([TESTLIBS : ${TESTLIBS}])
|
||||||
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
|
AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
|
||||||
AC_MSG_RESULT([])
|
AC_MSG_RESULT([])
|
||||||
AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
|
AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
|
||||||
|
@ -39,7 +39,7 @@ typedef struct arena_s arena_t;
|
|||||||
#ifdef JEMALLOC_ARENA_STRUCTS_A
|
#ifdef JEMALLOC_ARENA_STRUCTS_A
|
||||||
struct arena_run_s {
|
struct arena_run_s {
|
||||||
/* Index of bin this run is associated with. */
|
/* Index of bin this run is associated with. */
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
/* Number of free regions in run. */
|
/* Number of free regions in run. */
|
||||||
unsigned nfree;
|
unsigned nfree;
|
||||||
@ -424,7 +424,7 @@ extern arena_bin_info_t arena_bin_info[NBINS];
|
|||||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||||
extern size_t map_misc_offset;
|
extern size_t map_misc_offset;
|
||||||
extern size_t arena_maxrun; /* Max run size for arenas. */
|
extern size_t arena_maxrun; /* Max run size for arenas. */
|
||||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
extern size_t large_maxclass; /* Max large size class. */
|
||||||
extern unsigned nlclasses; /* Number of large size classes. */
|
extern unsigned nlclasses; /* Number of large size classes. */
|
||||||
extern unsigned nhclasses; /* Number of huge size classes. */
|
extern unsigned nhclasses; /* Number of huge size classes. */
|
||||||
|
|
||||||
@ -448,7 +448,7 @@ bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
|
|||||||
void arena_maybe_purge(arena_t *arena);
|
void arena_maybe_purge(arena_t *arena);
|
||||||
void arena_purge_all(arena_t *arena);
|
void arena_purge_all(arena_t *arena);
|
||||||
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
||||||
index_t binind, uint64_t prof_accumbytes);
|
szind_t binind, uint64_t prof_accumbytes);
|
||||||
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
||||||
bool zero);
|
bool zero);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
@ -488,7 +488,7 @@ extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
|
|||||||
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, bool zero);
|
size_t extra, bool zero);
|
||||||
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache);
|
size_t size, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||||
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
|
||||||
ssize_t arena_lg_dirty_mult_default_get(void);
|
ssize_t arena_lg_dirty_mult_default_get(void);
|
||||||
@ -519,17 +519,19 @@ arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
|
|||||||
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
|
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbitsp_read(size_t *mapbitsp);
|
size_t arena_mapbitsp_read(size_t *mapbitsp);
|
||||||
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
|
size_t arena_mapbits_size_decode(size_t mapbits);
|
||||||
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
|
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
|
||||||
size_t pageind);
|
size_t pageind);
|
||||||
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
|
szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
|
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
|
||||||
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
|
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
|
||||||
|
size_t arena_mapbits_size_encode(size_t size);
|
||||||
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
|
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t size, size_t flags);
|
size_t size, size_t flags);
|
||||||
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
||||||
@ -539,21 +541,23 @@ void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
|
|||||||
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t size, size_t flags);
|
size_t size, size_t flags);
|
||||||
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
||||||
index_t binind);
|
szind_t binind);
|
||||||
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t runind, index_t binind, size_t flags);
|
size_t runind, szind_t binind, size_t flags);
|
||||||
void arena_metadata_allocated_add(arena_t *arena, size_t size);
|
void arena_metadata_allocated_add(arena_t *arena, size_t size);
|
||||||
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
|
||||||
size_t arena_metadata_allocated_get(arena_t *arena);
|
size_t arena_metadata_allocated_get(arena_t *arena);
|
||||||
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||||
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||||
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||||
index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
||||||
index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||||
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||||
const void *ptr);
|
const void *ptr);
|
||||||
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
|
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
|
||||||
void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
||||||
|
void arena_prof_tctx_reset(const void *ptr, size_t usize,
|
||||||
|
const void *old_ptr, prof_tctx_t *old_tctx);
|
||||||
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||||
tcache_t *tcache);
|
tcache_t *tcache);
|
||||||
arena_t *arena_aalloc(const void *ptr);
|
arena_t *arena_aalloc(const void *ptr);
|
||||||
@ -652,6 +656,22 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
|
|||||||
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
|
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
arena_mapbits_size_decode(size_t mapbits)
|
||||||
|
{
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
#if CHUNK_MAP_SIZE_SHIFT > 0
|
||||||
|
size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
|
||||||
|
#elif CHUNK_MAP_SIZE_SHIFT == 0
|
||||||
|
size = mapbits & CHUNK_MAP_SIZE_MASK;
|
||||||
|
#else
|
||||||
|
size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return (size);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
|
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
|
||||||
{
|
{
|
||||||
@ -659,7 +679,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
|
|||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
||||||
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
|
return (arena_mapbits_size_decode(mapbits));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
@ -670,7 +690,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
|
|||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
|
||||||
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
|
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
|
||||||
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
|
return (arena_mapbits_size_decode(mapbits));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
@ -684,11 +704,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
|
|||||||
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
|
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE index_t
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
|
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
|
||||||
{
|
{
|
||||||
size_t mapbits;
|
size_t mapbits;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
||||||
@ -754,6 +774,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
|
|||||||
*mapbitsp = mapbits;
|
*mapbitsp = mapbits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
|
arena_mapbits_size_encode(size_t size)
|
||||||
|
{
|
||||||
|
size_t mapbits;
|
||||||
|
|
||||||
|
#if CHUNK_MAP_SIZE_SHIFT > 0
|
||||||
|
mapbits = size << CHUNK_MAP_SIZE_SHIFT;
|
||||||
|
#elif CHUNK_MAP_SIZE_SHIFT == 0
|
||||||
|
mapbits = size;
|
||||||
|
#else
|
||||||
|
mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
|
||||||
|
return (mapbits);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
||||||
size_t flags)
|
size_t flags)
|
||||||
@ -761,11 +798,10 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|||||||
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
|
|
||||||
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
||||||
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
||||||
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
|
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
||||||
CHUNK_MAP_BININD_INVALID | flags);
|
CHUNK_MAP_BININD_INVALID | flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -777,10 +813,9 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
|||||||
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
|
|
||||||
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
|
||||||
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | (mapbits
|
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
||||||
& ~CHUNK_MAP_SIZE_MASK));
|
(mapbits & ~CHUNK_MAP_SIZE_MASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -799,18 +834,17 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
|||||||
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
|
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
|
|
||||||
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
|
||||||
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
|
||||||
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
|
||||||
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
|
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
|
||||||
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
|
||||||
CHUNK_MAP_ALLOCATED);
|
CHUNK_MAP_ALLOCATED);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
||||||
index_t binind)
|
szind_t binind)
|
||||||
{
|
{
|
||||||
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
||||||
@ -824,7 +858,7 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
||||||
index_t binind, size_t flags)
|
szind_t binind, size_t flags)
|
||||||
{
|
{
|
||||||
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||||
|
|
||||||
@ -901,10 +935,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE index_t
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
||||||
{
|
{
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
||||||
|
|
||||||
@ -916,7 +950,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
size_t rpages_ind;
|
size_t rpages_ind;
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
arena_bin_t *bin;
|
arena_bin_t *bin;
|
||||||
index_t run_binind, actual_binind;
|
szind_t run_binind, actual_binind;
|
||||||
arena_bin_info_t *bin_info;
|
arena_bin_info_t *bin_info;
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *miscelm;
|
||||||
void *rpages;
|
void *rpages;
|
||||||
@ -950,10 +984,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||||
|
|
||||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||||
JEMALLOC_INLINE index_t
|
JEMALLOC_INLINE szind_t
|
||||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||||
{
|
{
|
||||||
index_t binind = bin - arena->bins;
|
szind_t binind = bin - arena->bins;
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
return (binind);
|
return (binind);
|
||||||
}
|
}
|
||||||
@ -1060,7 +1094,7 @@ arena_prof_tctx_get(const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
|
|
||||||
@ -1070,17 +1104,59 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
|||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
if (likely(chunk != ptr)) {
|
if (likely(chunk != ptr)) {
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||||
|
|
||||||
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
|
if (unlikely(usize > SMALL_MAXCLASS || (uintptr_t)tctx >
|
||||||
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
|
(uintptr_t)1U)) {
|
||||||
pageind);
|
arena_chunk_map_misc_t *elm;
|
||||||
|
|
||||||
|
assert(arena_mapbits_large_get(chunk, pageind) != 0);
|
||||||
|
|
||||||
|
elm = arena_miscelm_get(chunk, pageind);
|
||||||
atomic_write_p(&elm->prof_tctx_pun, tctx);
|
atomic_write_p(&elm->prof_tctx_pun, tctx);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* tctx must always be initialized for large runs.
|
||||||
|
* Assert that the surrounding conditional logic is
|
||||||
|
* equivalent to checking whether ptr refers to a large
|
||||||
|
* run.
|
||||||
|
*/
|
||||||
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_prof_tctx_set(ptr, tctx);
|
huge_prof_tctx_set(ptr, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void
|
||||||
|
arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
||||||
|
prof_tctx_t *old_tctx)
|
||||||
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
if (unlikely(usize > SMALL_MAXCLASS || (ptr == old_ptr &&
|
||||||
|
(uintptr_t)old_tctx > (uintptr_t)1U))) {
|
||||||
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
|
if (likely(chunk != ptr)) {
|
||||||
|
size_t pageind;
|
||||||
|
arena_chunk_map_misc_t *elm;
|
||||||
|
|
||||||
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
||||||
|
LG_PAGE;
|
||||||
|
assert(arena_mapbits_allocated_get(chunk, pageind) !=
|
||||||
|
0);
|
||||||
|
assert(arena_mapbits_large_get(chunk, pageind) != 0);
|
||||||
|
|
||||||
|
elm = arena_miscelm_get(chunk, pageind);
|
||||||
|
atomic_write_p(&elm->prof_tctx_pun,
|
||||||
|
(prof_tctx_t *)(uintptr_t)1U);
|
||||||
|
} else
|
||||||
|
huge_prof_tctx_reset(ptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
||||||
tcache_t *tcache)
|
tcache_t *tcache)
|
||||||
@ -1098,7 +1174,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
|||||||
zero));
|
zero));
|
||||||
} else
|
} else
|
||||||
return (arena_malloc_small(arena, size, zero));
|
return (arena_malloc_small(arena, size, zero));
|
||||||
} else if (likely(size <= arena_maxclass)) {
|
} else if (likely(size <= large_maxclass)) {
|
||||||
/*
|
/*
|
||||||
* Initialize tcache after checking size in order to avoid
|
* Initialize tcache after checking size in order to avoid
|
||||||
* infinite recursion during tcache initialization.
|
* infinite recursion during tcache initialization.
|
||||||
@ -1131,7 +1207,7 @@ arena_salloc(const void *ptr, bool demote)
|
|||||||
size_t ret;
|
size_t ret;
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
size_t pageind;
|
size_t pageind;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
@ -1190,7 +1266,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
|
|||||||
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
|
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
index_t binind = arena_ptr_small_binind_get(ptr,
|
szind_t binind = arena_ptr_small_binind_get(ptr,
|
||||||
mapbits);
|
mapbits);
|
||||||
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
||||||
} else {
|
} else {
|
||||||
@ -1242,7 +1318,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
|
|||||||
if (likely(size <= SMALL_MAXCLASS)) {
|
if (likely(size <= SMALL_MAXCLASS)) {
|
||||||
/* Small allocation. */
|
/* Small allocation. */
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
index_t binind = size2index(size);
|
szind_t binind = size2index(size);
|
||||||
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
tcache_dalloc_small(tsd, tcache, ptr, binind);
|
||||||
} else {
|
} else {
|
||||||
size_t pageind = ((uintptr_t)ptr -
|
size_t pageind = ((uintptr_t)ptr -
|
||||||
|
@ -13,11 +13,10 @@ void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
|
|||||||
tcache_t *tcache);
|
tcache_t *tcache);
|
||||||
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||||
bool zero, tcache_t *tcache);
|
bool zero, tcache_t *tcache);
|
||||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
|
||||||
size_t extra, bool zero);
|
size_t usize_max, bool zero);
|
||||||
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t extra, size_t alignment, bool zero,
|
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
tcache_t *tcache);
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||||
@ -27,6 +26,7 @@ arena_t *huge_aalloc(const void *ptr);
|
|||||||
size_t huge_salloc(const void *ptr);
|
size_t huge_salloc(const void *ptr);
|
||||||
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
||||||
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||||
|
void huge_prof_tctx_reset(const void *ptr);
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -184,7 +184,7 @@ static const bool config_cache_oblivious =
|
|||||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||||
|
|
||||||
/* Size class index type. */
|
/* Size class index type. */
|
||||||
typedef unsigned index_t;
|
typedef unsigned szind_t;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flags bits:
|
* Flags bits:
|
||||||
@ -232,7 +232,7 @@ typedef unsigned index_t;
|
|||||||
# ifdef __alpha__
|
# ifdef __alpha__
|
||||||
# define LG_QUANTUM 4
|
# define LG_QUANTUM 4
|
||||||
# endif
|
# endif
|
||||||
# ifdef __sparc64__
|
# if (defined(__sparc64__) || defined(__sparcv9))
|
||||||
# define LG_QUANTUM 4
|
# define LG_QUANTUM 4
|
||||||
# endif
|
# endif
|
||||||
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
|
# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
|
||||||
@ -511,12 +511,12 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/huge.h"
|
#include "jemalloc/internal/huge.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
index_t size2index_compute(size_t size);
|
szind_t size2index_compute(size_t size);
|
||||||
index_t size2index_lookup(size_t size);
|
szind_t size2index_lookup(size_t size);
|
||||||
index_t size2index(size_t size);
|
szind_t size2index(size_t size);
|
||||||
size_t index2size_compute(index_t index);
|
size_t index2size_compute(szind_t index);
|
||||||
size_t index2size_lookup(index_t index);
|
size_t index2size_lookup(szind_t index);
|
||||||
size_t index2size(index_t index);
|
size_t index2size(szind_t index);
|
||||||
size_t s2u_compute(size_t size);
|
size_t s2u_compute(size_t size);
|
||||||
size_t s2u_lookup(size_t size);
|
size_t s2u_lookup(size_t size);
|
||||||
size_t s2u(size_t size);
|
size_t s2u(size_t size);
|
||||||
@ -527,7 +527,7 @@ arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||||
JEMALLOC_INLINE index_t
|
JEMALLOC_INLINE szind_t
|
||||||
size2index_compute(size_t size)
|
size2index_compute(size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -558,7 +558,7 @@ size2index_compute(size_t size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE index_t
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
size2index_lookup(size_t size)
|
size2index_lookup(size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -571,7 +571,7 @@ size2index_lookup(size_t size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE index_t
|
JEMALLOC_ALWAYS_INLINE szind_t
|
||||||
size2index(size_t size)
|
size2index(size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -582,7 +582,7 @@ size2index(size_t size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
index2size_compute(index_t index)
|
index2size_compute(szind_t index)
|
||||||
{
|
{
|
||||||
|
|
||||||
#if (NTBINS > 0)
|
#if (NTBINS > 0)
|
||||||
@ -609,7 +609,7 @@ index2size_compute(index_t index)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
index2size_lookup(index_t index)
|
index2size_lookup(szind_t index)
|
||||||
{
|
{
|
||||||
size_t ret = (size_t)index2size_tab[index];
|
size_t ret = (size_t)index2size_tab[index];
|
||||||
assert(ret == index2size_compute(index));
|
assert(ret == index2size_compute(index));
|
||||||
@ -617,7 +617,7 @@ index2size_lookup(index_t index)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
index2size(index_t index)
|
index2size(szind_t index)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(index < NSIZES);
|
assert(index < NSIZES);
|
||||||
@ -705,7 +705,7 @@ sa2u(size_t size, size_t alignment)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Try for a large size class. */
|
/* Try for a large size class. */
|
||||||
if (likely(size <= arena_maxclass) && likely(alignment < chunksize)) {
|
if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
|
||||||
/*
|
/*
|
||||||
* We can't achieve subpage alignment, so round up alignment
|
* We can't achieve subpage alignment, so round up alignment
|
||||||
* to the minimum that can actually be supported.
|
* to the minimum that can actually be supported.
|
||||||
@ -976,7 +976,7 @@ u2rz(size_t usize)
|
|||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
if (usize <= SMALL_MAXCLASS) {
|
if (usize <= SMALL_MAXCLASS) {
|
||||||
index_t binind = size2index(usize);
|
szind_t binind = size2index(usize);
|
||||||
ret = arena_bin_info[binind].redzone_size;
|
ret = arena_bin_info[binind].redzone_size;
|
||||||
} else
|
} else
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -1096,7 +1096,7 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
|||||||
zero, tcache, arena));
|
zero, tcache, arena));
|
||||||
}
|
}
|
||||||
|
|
||||||
return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero,
|
return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
|
||||||
tcache));
|
tcache));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,13 +50,14 @@ arena_mapbits_large_size_get
|
|||||||
arena_mapbitsp_get
|
arena_mapbitsp_get
|
||||||
arena_mapbitsp_read
|
arena_mapbitsp_read
|
||||||
arena_mapbitsp_write
|
arena_mapbitsp_write
|
||||||
|
arena_mapbits_size_decode
|
||||||
|
arena_mapbits_size_encode
|
||||||
arena_mapbits_small_runind_get
|
arena_mapbits_small_runind_get
|
||||||
arena_mapbits_small_set
|
arena_mapbits_small_set
|
||||||
arena_mapbits_unallocated_set
|
arena_mapbits_unallocated_set
|
||||||
arena_mapbits_unallocated_size_get
|
arena_mapbits_unallocated_size_get
|
||||||
arena_mapbits_unallocated_size_set
|
arena_mapbits_unallocated_size_set
|
||||||
arena_mapbits_unzeroed_get
|
arena_mapbits_unzeroed_get
|
||||||
arena_maxclass
|
|
||||||
arena_maxrun
|
arena_maxrun
|
||||||
arena_maybe_purge
|
arena_maybe_purge
|
||||||
arena_metadata_allocated_add
|
arena_metadata_allocated_add
|
||||||
@ -79,6 +80,7 @@ arena_prof_accum_impl
|
|||||||
arena_prof_accum_locked
|
arena_prof_accum_locked
|
||||||
arena_prof_promoted
|
arena_prof_promoted
|
||||||
arena_prof_tctx_get
|
arena_prof_tctx_get
|
||||||
|
arena_prof_tctx_reset
|
||||||
arena_prof_tctx_set
|
arena_prof_tctx_set
|
||||||
arena_ptr_small_binind_get
|
arena_ptr_small_binind_get
|
||||||
arena_purge_all
|
arena_purge_all
|
||||||
@ -249,6 +251,7 @@ huge_dalloc_junk
|
|||||||
huge_malloc
|
huge_malloc
|
||||||
huge_palloc
|
huge_palloc
|
||||||
huge_prof_tctx_get
|
huge_prof_tctx_get
|
||||||
|
huge_prof_tctx_reset
|
||||||
huge_prof_tctx_set
|
huge_prof_tctx_set
|
||||||
huge_ralloc
|
huge_ralloc
|
||||||
huge_ralloc_no_move
|
huge_ralloc_no_move
|
||||||
@ -283,6 +286,7 @@ ixalloc
|
|||||||
jemalloc_postfork_child
|
jemalloc_postfork_child
|
||||||
jemalloc_postfork_parent
|
jemalloc_postfork_parent
|
||||||
jemalloc_prefork
|
jemalloc_prefork
|
||||||
|
large_maxclass
|
||||||
lg_floor
|
lg_floor
|
||||||
malloc_cprintf
|
malloc_cprintf
|
||||||
malloc_mutex_init
|
malloc_mutex_init
|
||||||
@ -377,6 +381,7 @@ prof_reset
|
|||||||
prof_sample_accum_update
|
prof_sample_accum_update
|
||||||
prof_sample_threshold_update
|
prof_sample_threshold_update
|
||||||
prof_tctx_get
|
prof_tctx_get
|
||||||
|
prof_tctx_reset
|
||||||
prof_tctx_set
|
prof_tctx_set
|
||||||
prof_tdata_cleanup
|
prof_tdata_cleanup
|
||||||
prof_tdata_get
|
prof_tdata_get
|
||||||
|
@ -90,10 +90,11 @@ struct prof_tctx_s {
|
|||||||
prof_tdata_t *tdata;
|
prof_tdata_t *tdata;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy of tdata->thr_uid, necessary because tdata may be defunct during
|
* Copy of tdata->thr_{uid,discrim}, necessary because tdata may be
|
||||||
* teardown.
|
* defunct during teardown.
|
||||||
*/
|
*/
|
||||||
uint64_t thr_uid;
|
uint64_t thr_uid;
|
||||||
|
uint64_t thr_discrim;
|
||||||
|
|
||||||
/* Profiling counters, protected by tdata->lock. */
|
/* Profiling counters, protected by tdata->lock. */
|
||||||
prof_cnt_t cnts;
|
prof_cnt_t cnts;
|
||||||
@ -330,14 +331,18 @@ bool prof_gdump_get_unlocked(void);
|
|||||||
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
|
||||||
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
|
||||||
prof_tdata_t **tdata_out);
|
prof_tdata_t **tdata_out);
|
||||||
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
|
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
|
||||||
|
bool update);
|
||||||
prof_tctx_t *prof_tctx_get(const void *ptr);
|
prof_tctx_t *prof_tctx_get(const void *ptr);
|
||||||
void prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
||||||
|
void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
||||||
|
prof_tctx_t *tctx);
|
||||||
void prof_malloc_sample_object(const void *ptr, size_t usize,
|
void prof_malloc_sample_object(const void *ptr, size_t usize,
|
||||||
prof_tctx_t *tctx);
|
prof_tctx_t *tctx);
|
||||||
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
|
||||||
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
|
||||||
prof_tctx_t *tctx, bool updated, size_t old_usize, prof_tctx_t *old_tctx);
|
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
|
||||||
|
size_t old_usize, prof_tctx_t *old_tctx);
|
||||||
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
|
void prof_free(tsd_t *tsd, const void *ptr, size_t usize);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -402,13 +407,24 @@ prof_tctx_get(const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
arena_prof_tctx_set(ptr, tctx);
|
arena_prof_tctx_set(ptr, usize, tctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
|
||||||
|
prof_tctx_t *old_tctx)
|
||||||
|
{
|
||||||
|
|
||||||
|
cassert(config_prof);
|
||||||
|
assert(ptr != NULL);
|
||||||
|
|
||||||
|
arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
@ -442,7 +458,7 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
|
||||||
prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
|
prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
|
||||||
{
|
{
|
||||||
prof_tctx_t *ret;
|
prof_tctx_t *ret;
|
||||||
prof_tdata_t *tdata;
|
prof_tdata_t *tdata;
|
||||||
@ -450,8 +466,8 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool update)
|
|||||||
|
|
||||||
assert(usize == s2u(usize));
|
assert(usize == s2u(usize));
|
||||||
|
|
||||||
if (!prof_active_get_unlocked() || likely(prof_sample_accum_update(tsd,
|
if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
|
||||||
usize, update, &tdata)))
|
&tdata)))
|
||||||
ret = (prof_tctx_t *)(uintptr_t)1U;
|
ret = (prof_tctx_t *)(uintptr_t)1U;
|
||||||
else {
|
else {
|
||||||
bt_init(&bt, tdata->vec);
|
bt_init(&bt, tdata->vec);
|
||||||
@ -473,22 +489,24 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
|||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||||
prof_malloc_sample_object(ptr, usize, tctx);
|
prof_malloc_sample_object(ptr, usize, tctx);
|
||||||
else
|
else
|
||||||
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
||||||
bool updated, size_t old_usize, prof_tctx_t *old_tctx)
|
bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
|
||||||
|
prof_tctx_t *old_tctx)
|
||||||
{
|
{
|
||||||
|
bool sampled, old_sampled;
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||||
|
|
||||||
if (!updated && ptr != NULL) {
|
if (prof_active && !updated && ptr != NULL) {
|
||||||
assert(usize == isalloc(ptr, true));
|
assert(usize == isalloc(ptr, true));
|
||||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||||
/*
|
/*
|
||||||
* Don't sample. The usize passed to PROF_ALLOC_PREP()
|
* Don't sample. The usize passed to prof_alloc_prep()
|
||||||
* was larger than what actually got allocated, so a
|
* was larger than what actually got allocated, so a
|
||||||
* backtrace was captured for this allocation, even
|
* backtrace was captured for this allocation, even
|
||||||
* though its actual usize was insufficient to cross the
|
* though its actual usize was insufficient to cross the
|
||||||
@ -498,12 +516,16 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U))
|
sampled = ((uintptr_t)tctx > (uintptr_t)1U);
|
||||||
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
|
||||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
|
||||||
|
if (unlikely(sampled))
|
||||||
prof_malloc_sample_object(ptr, usize, tctx);
|
prof_malloc_sample_object(ptr, usize, tctx);
|
||||||
else
|
else
|
||||||
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
|
||||||
|
|
||||||
|
if (unlikely(old_sampled))
|
||||||
|
prof_free_sampled_object(tsd, old_usize, old_tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
|
@ -167,6 +167,8 @@ size_classes() {
|
|||||||
lg_large_minclass=$((${lg_grp} + 2))
|
lg_large_minclass=$((${lg_grp} + 2))
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
# Final written value is correct:
|
||||||
|
huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
||||||
index=$((${index} + 1))
|
index=$((${index} + 1))
|
||||||
ndelta=$((${ndelta} + 1))
|
ndelta=$((${ndelta} + 1))
|
||||||
done
|
done
|
||||||
@ -185,6 +187,7 @@ size_classes() {
|
|||||||
# - lookup_maxclass
|
# - lookup_maxclass
|
||||||
# - small_maxclass
|
# - small_maxclass
|
||||||
# - lg_large_minclass
|
# - lg_large_minclass
|
||||||
|
# - huge_maxclass
|
||||||
}
|
}
|
||||||
|
|
||||||
cat <<EOF
|
cat <<EOF
|
||||||
@ -215,6 +218,7 @@ cat <<EOF
|
|||||||
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
|
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
|
||||||
* SMALL_MAXCLASS: Maximum small size class.
|
* SMALL_MAXCLASS: Maximum small size class.
|
||||||
* LG_LARGE_MINCLASS: Lg of minimum large size class.
|
* LG_LARGE_MINCLASS: Lg of minimum large size class.
|
||||||
|
* HUGE_MAXCLASS: Maximum (huge) size class.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
||||||
@ -238,6 +242,7 @@ for lg_z in ${lg_zarr} ; do
|
|||||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||||
|
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
|
||||||
echo "#endif"
|
echo "#endif"
|
||||||
echo
|
echo
|
||||||
done
|
done
|
||||||
|
@ -77,7 +77,7 @@ struct tcache_s {
|
|||||||
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
ql_elm(tcache_t) link; /* Used for aggregating stats. */
|
||||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
|
||||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||||
index_t next_gc_bin; /* Next bin to GC. */
|
szind_t next_gc_bin; /* Next bin to GC. */
|
||||||
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
||||||
/*
|
/*
|
||||||
* The pointer stacks associated with tbins follow as a contiguous
|
* The pointer stacks associated with tbins follow as a contiguous
|
||||||
@ -126,10 +126,10 @@ extern tcaches_t *tcaches;
|
|||||||
size_t tcache_salloc(const void *ptr);
|
size_t tcache_salloc(const void *ptr);
|
||||||
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
|
||||||
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||||
tcache_bin_t *tbin, index_t binind);
|
tcache_bin_t *tbin, szind_t binind);
|
||||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
index_t binind, unsigned rem);
|
szind_t binind, unsigned rem);
|
||||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
unsigned rem, tcache_t *tcache);
|
unsigned rem, tcache_t *tcache);
|
||||||
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||||
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
|
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
|
||||||
@ -161,7 +161,7 @@ void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|||||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||||
size_t size, bool zero);
|
size_t size, bool zero);
|
||||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||||
index_t binind);
|
szind_t binind);
|
||||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||||
size_t size);
|
size_t size);
|
||||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||||
@ -267,7 +267,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
bool zero)
|
bool zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
size_t usize;
|
size_t usize;
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
bool zero)
|
bool zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
size_t usize;
|
size_t usize;
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
|
|
||||||
@ -360,7 +360,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
|
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
|
||||||
{
|
{
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
@ -386,7 +386,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
|
|||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
|
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
tcache_bin_t *tbin;
|
tcache_bin_t *tbin;
|
||||||
tcache_bin_info_t *tbin_info;
|
tcache_bin_info_t *tbin_info;
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
|
|||||||
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
|
||||||
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
|
||||||
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
|
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
|
||||||
JEMALLOC_ATTR(malloc);
|
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
#ifdef JEMALLOC_OVERRIDE_VALLOC
|
||||||
|
261
src/arena.c
261
src/arena.c
@ -11,7 +11,7 @@ arena_bin_info_t arena_bin_info[NBINS];
|
|||||||
size_t map_bias;
|
size_t map_bias;
|
||||||
size_t map_misc_offset;
|
size_t map_misc_offset;
|
||||||
size_t arena_maxrun; /* Max run size for arenas. */
|
size_t arena_maxrun; /* Max run size for arenas. */
|
||||||
size_t arena_maxclass; /* Max size class for arenas. */
|
size_t large_maxclass; /* Max large size class. */
|
||||||
static size_t small_maxrun; /* Max run size used for small size classes. */
|
static size_t small_maxrun; /* Max run size used for small size classes. */
|
||||||
static bool *small_run_tab; /* Valid small run page multiples. */
|
static bool *small_run_tab; /* Valid small run page multiples. */
|
||||||
unsigned nlclasses; /* Number of large size classes. */
|
unsigned nlclasses; /* Number of large size classes. */
|
||||||
@ -39,7 +39,7 @@ JEMALLOC_INLINE_C arena_chunk_map_misc_t *
|
|||||||
arena_miscelm_key_create(size_t size)
|
arena_miscelm_key_create(size_t size)
|
||||||
{
|
{
|
||||||
|
|
||||||
return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) |
|
return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
|
||||||
CHUNK_MAP_KEY));
|
CHUNK_MAP_KEY));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,8 +58,7 @@ arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
|
|||||||
|
|
||||||
assert(arena_miscelm_is_key(miscelm));
|
assert(arena_miscelm_is_key(miscelm));
|
||||||
|
|
||||||
return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >>
|
return (arena_mapbits_size_decode((uintptr_t)miscelm));
|
||||||
CHUNK_MAP_SIZE_SHIFT);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C size_t
|
JEMALLOC_INLINE_C size_t
|
||||||
@ -73,7 +72,7 @@ arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
|
|||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
|
||||||
pageind = arena_miscelm_to_pageind(miscelm);
|
pageind = arena_miscelm_to_pageind(miscelm);
|
||||||
mapbits = arena_mapbits_get(chunk, pageind);
|
mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
|
return (arena_mapbits_size_decode(mapbits));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE_C int
|
JEMALLOC_INLINE_C int
|
||||||
@ -315,7 +314,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
|
|||||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
||||||
index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
|
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
|
||||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
unsigned regind = arena_run_regind(run, bin_info, ptr);
|
unsigned regind = arena_run_regind(run, bin_info, ptr);
|
||||||
|
|
||||||
@ -426,7 +425,7 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
|||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *miscelm;
|
||||||
size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
|
size_t flag_dirty, flag_decommitted, run_ind, need_pages;
|
||||||
size_t flag_unzeroed_mask;
|
size_t flag_unzeroed_mask;
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||||
@ -460,6 +459,7 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
|
|||||||
* The run is clean, so some pages may be zeroed (i.e.
|
* The run is clean, so some pages may be zeroed (i.e.
|
||||||
* never before touched).
|
* never before touched).
|
||||||
*/
|
*/
|
||||||
|
size_t i;
|
||||||
for (i = 0; i < need_pages; i++) {
|
for (i = 0; i < need_pages; i++) {
|
||||||
if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
|
if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
|
||||||
!= 0)
|
!= 0)
|
||||||
@ -508,7 +508,7 @@ arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
|
|||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
|
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
|
||||||
index_t binind)
|
szind_t binind)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
arena_chunk_map_misc_t *miscelm;
|
arena_chunk_map_misc_t *miscelm;
|
||||||
@ -780,7 +780,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
static void
|
static void
|
||||||
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
|
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
|
||||||
{
|
{
|
||||||
index_t index = size2index(usize) - nlclasses - NBINS;
|
szind_t index = size2index(usize) - nlclasses - NBINS;
|
||||||
|
|
||||||
cassert(config_stats);
|
cassert(config_stats);
|
||||||
|
|
||||||
@ -793,7 +793,7 @@ arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
|
|||||||
static void
|
static void
|
||||||
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
|
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
|
||||||
{
|
{
|
||||||
index_t index = size2index(usize) - nlclasses - NBINS;
|
szind_t index = size2index(usize) - nlclasses - NBINS;
|
||||||
|
|
||||||
cassert(config_stats);
|
cassert(config_stats);
|
||||||
|
|
||||||
@ -806,7 +806,7 @@ arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
|
|||||||
static void
|
static void
|
||||||
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
|
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
|
||||||
{
|
{
|
||||||
index_t index = size2index(usize) - nlclasses - NBINS;
|
szind_t index = size2index(usize) - nlclasses - NBINS;
|
||||||
|
|
||||||
cassert(config_stats);
|
cassert(config_stats);
|
||||||
|
|
||||||
@ -819,7 +819,7 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
|
|||||||
static void
|
static void
|
||||||
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
|
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
|
||||||
{
|
{
|
||||||
index_t index = size2index(usize) - nlclasses - NBINS;
|
szind_t index = size2index(usize) - nlclasses - NBINS;
|
||||||
|
|
||||||
cassert(config_stats);
|
cassert(config_stats);
|
||||||
|
|
||||||
@ -1125,7 +1125,7 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
|
arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
|
||||||
{
|
{
|
||||||
arena_run_t *run = arena_run_first_best_fit(arena, size);
|
arena_run_t *run = arena_run_first_best_fit(arena, size);
|
||||||
if (run != NULL) {
|
if (run != NULL) {
|
||||||
@ -1136,7 +1136,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
|
arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
@ -1889,7 +1889,7 @@ static arena_run_t *
|
|||||||
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
|
arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
|
||||||
{
|
{
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
arena_bin_info_t *bin_info;
|
arena_bin_info_t *bin_info;
|
||||||
|
|
||||||
/* Look for a usable run. */
|
/* Look for a usable run. */
|
||||||
@ -1939,8 +1939,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
|
|||||||
static void *
|
static void *
|
||||||
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
|
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
|
||||||
{
|
{
|
||||||
void *ret;
|
szind_t binind;
|
||||||
index_t binind;
|
|
||||||
arena_bin_info_t *bin_info;
|
arena_bin_info_t *bin_info;
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
|
|
||||||
@ -1953,6 +1952,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
|
|||||||
* Another thread updated runcur while this one ran without the
|
* Another thread updated runcur while this one ran without the
|
||||||
* bin lock in arena_bin_nonfull_run_get().
|
* bin lock in arena_bin_nonfull_run_get().
|
||||||
*/
|
*/
|
||||||
|
void *ret;
|
||||||
assert(bin->runcur->nfree > 0);
|
assert(bin->runcur->nfree > 0);
|
||||||
ret = arena_run_reg_alloc(bin->runcur, bin_info);
|
ret = arena_run_reg_alloc(bin->runcur, bin_info);
|
||||||
if (run != NULL) {
|
if (run != NULL) {
|
||||||
@ -1986,13 +1986,11 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
|
arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
|
||||||
uint64_t prof_accumbytes)
|
uint64_t prof_accumbytes)
|
||||||
{
|
{
|
||||||
unsigned i, nfill;
|
unsigned i, nfill;
|
||||||
arena_bin_t *bin;
|
arena_bin_t *bin;
|
||||||
arena_run_t *run;
|
|
||||||
void *ptr;
|
|
||||||
|
|
||||||
assert(tbin->ncached == 0);
|
assert(tbin->ncached == 0);
|
||||||
|
|
||||||
@ -2002,6 +2000,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, index_t binind,
|
|||||||
malloc_mutex_lock(&bin->lock);
|
malloc_mutex_lock(&bin->lock);
|
||||||
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
|
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
|
||||||
tbin->lg_fill_div); i < nfill; i++) {
|
tbin->lg_fill_div); i < nfill; i++) {
|
||||||
|
arena_run_t *run;
|
||||||
|
void *ptr;
|
||||||
if ((run = bin->runcur) != NULL && run->nfree > 0)
|
if ((run = bin->runcur) != NULL && run->nfree > 0)
|
||||||
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
|
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
|
||||||
else
|
else
|
||||||
@ -2076,12 +2076,13 @@ arena_redzone_corruption_t *arena_redzone_corruption =
|
|||||||
static void
|
static void
|
||||||
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
|
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
|
||||||
{
|
{
|
||||||
size_t size = bin_info->reg_size;
|
|
||||||
size_t redzone_size = bin_info->redzone_size;
|
|
||||||
size_t i;
|
|
||||||
bool error = false;
|
bool error = false;
|
||||||
|
|
||||||
if (opt_junk_alloc) {
|
if (opt_junk_alloc) {
|
||||||
|
size_t size = bin_info->reg_size;
|
||||||
|
size_t redzone_size = bin_info->redzone_size;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
for (i = 1; i <= redzone_size; i++) {
|
for (i = 1; i <= redzone_size; i++) {
|
||||||
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
|
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
|
||||||
if (*byte != 0xa5) {
|
if (*byte != 0xa5) {
|
||||||
@ -2131,7 +2132,7 @@ arena_dalloc_junk_small_t *arena_dalloc_junk_small =
|
|||||||
void
|
void
|
||||||
arena_quarantine_junk_small(void *ptr, size_t usize)
|
arena_quarantine_junk_small(void *ptr, size_t usize)
|
||||||
{
|
{
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
arena_bin_info_t *bin_info;
|
arena_bin_info_t *bin_info;
|
||||||
cassert(config_fill);
|
cassert(config_fill);
|
||||||
assert(opt_junk_free);
|
assert(opt_junk_free);
|
||||||
@ -2149,7 +2150,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
|
|||||||
void *ret;
|
void *ret;
|
||||||
arena_bin_t *bin;
|
arena_bin_t *bin;
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
binind = size2index(size);
|
binind = size2index(size);
|
||||||
assert(binind < NBINS);
|
assert(binind < NBINS);
|
||||||
@ -2233,7 +2234,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
|
|||||||
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
|
ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
|
||||||
random_offset);
|
random_offset);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
index_t index = size2index(usize) - NBINS;
|
szind_t index = size2index(usize) - NBINS;
|
||||||
|
|
||||||
arena->stats.nmalloc_large++;
|
arena->stats.nmalloc_large++;
|
||||||
arena->stats.nrequests_large++;
|
arena->stats.nrequests_large++;
|
||||||
@ -2326,7 +2327,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
ret = arena_miscelm_to_rpages(miscelm);
|
ret = arena_miscelm_to_rpages(miscelm);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
index_t index = size2index(usize) - NBINS;
|
szind_t index = size2index(usize) - NBINS;
|
||||||
|
|
||||||
arena->stats.nmalloc_large++;
|
arena->stats.nmalloc_large++;
|
||||||
arena->stats.nrequests_large++;
|
arena->stats.nrequests_large++;
|
||||||
@ -2356,7 +2357,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
&& (usize & PAGE_MASK) == 0))) {
|
&& (usize & PAGE_MASK) == 0))) {
|
||||||
/* Small; alignment doesn't require special run placement. */
|
/* Small; alignment doesn't require special run placement. */
|
||||||
ret = arena_malloc(tsd, arena, usize, zero, tcache);
|
ret = arena_malloc(tsd, arena, usize, zero, tcache);
|
||||||
} else if (usize <= arena_maxclass && alignment <= PAGE) {
|
} else if (usize <= large_maxclass && alignment <= PAGE) {
|
||||||
/*
|
/*
|
||||||
* Large; alignment doesn't require special run placement.
|
* Large; alignment doesn't require special run placement.
|
||||||
* However, the cached pointer may be at a random offset from
|
* However, the cached pointer may be at a random offset from
|
||||||
@ -2367,7 +2368,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
if (config_cache_oblivious)
|
if (config_cache_oblivious)
|
||||||
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
|
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
|
||||||
} else {
|
} else {
|
||||||
if (likely(usize <= arena_maxclass)) {
|
if (likely(usize <= large_maxclass)) {
|
||||||
ret = arena_palloc_large(tsd, arena, usize, alignment,
|
ret = arena_palloc_large(tsd, arena, usize, alignment,
|
||||||
zero);
|
zero);
|
||||||
} else if (likely(alignment <= chunksize))
|
} else if (likely(alignment <= chunksize))
|
||||||
@ -2385,7 +2386,7 @@ arena_prof_promoted(const void *ptr, size_t size)
|
|||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
size_t pageind;
|
size_t pageind;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
@ -2413,7 +2414,7 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
|
|||||||
if (run == bin->runcur)
|
if (run == bin->runcur)
|
||||||
bin->runcur = NULL;
|
bin->runcur = NULL;
|
||||||
else {
|
else {
|
||||||
index_t binind = arena_bin_index(extent_node_arena_get(
|
szind_t binind = arena_bin_index(extent_node_arena_get(
|
||||||
&chunk->node), bin);
|
&chunk->node), bin);
|
||||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||||
|
|
||||||
@ -2477,7 +2478,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
arena_bin_t *bin;
|
arena_bin_t *bin;
|
||||||
arena_bin_info_t *bin_info;
|
arena_bin_info_t *bin_info;
|
||||||
index_t binind;
|
szind_t binind;
|
||||||
|
|
||||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
|
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
|
||||||
@ -2574,7 +2575,7 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
|
|||||||
if (!junked)
|
if (!junked)
|
||||||
arena_dalloc_junk_large(ptr, usize);
|
arena_dalloc_junk_large(ptr, usize);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
index_t index = size2index(usize) - NBINS;
|
szind_t index = size2index(usize) - NBINS;
|
||||||
|
|
||||||
arena->stats.ndalloc_large++;
|
arena->stats.ndalloc_large++;
|
||||||
arena->stats.allocated_large -= usize;
|
arena->stats.allocated_large -= usize;
|
||||||
@ -2621,8 +2622,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|||||||
arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
|
arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
|
||||||
large_pad, true);
|
large_pad, true);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
index_t oldindex = size2index(oldsize) - NBINS;
|
szind_t oldindex = size2index(oldsize) - NBINS;
|
||||||
index_t index = size2index(size) - NBINS;
|
szind_t index = size2index(size) - NBINS;
|
||||||
|
|
||||||
arena->stats.ndalloc_large++;
|
arena->stats.ndalloc_large++;
|
||||||
arena->stats.allocated_large -= oldsize;
|
arena->stats.allocated_large -= oldsize;
|
||||||
@ -2641,42 +2642,42 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|||||||
|
|
||||||
static bool
|
static bool
|
||||||
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||||
size_t oldsize, size_t size, size_t extra, bool zero)
|
size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||||
size_t npages = (oldsize + large_pad) >> LG_PAGE;
|
size_t npages = (oldsize + large_pad) >> LG_PAGE;
|
||||||
size_t followsize;
|
size_t followsize;
|
||||||
size_t usize_min = s2u(size);
|
|
||||||
|
|
||||||
assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
|
assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
|
||||||
large_pad);
|
large_pad);
|
||||||
|
|
||||||
/* Try to extend the run. */
|
/* Try to extend the run. */
|
||||||
assert(usize_min > oldsize);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
malloc_mutex_lock(&arena->lock);
|
||||||
if (pageind+npages < chunk_npages &&
|
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
|
||||||
arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
|
pageind+npages) != 0)
|
||||||
(followsize = arena_mapbits_unallocated_size_get(chunk,
|
goto label_fail;
|
||||||
pageind+npages)) >= usize_min - oldsize) {
|
followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
|
||||||
|
if (oldsize + followsize >= usize_min) {
|
||||||
/*
|
/*
|
||||||
* The next run is available and sufficiently large. Split the
|
* The next run is available and sufficiently large. Split the
|
||||||
* following run, then merge the first part with the existing
|
* following run, then merge the first part with the existing
|
||||||
* allocation.
|
* allocation.
|
||||||
*/
|
*/
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
size_t flag_dirty, flag_unzeroed_mask, splitsize, usize;
|
size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
|
||||||
|
|
||||||
usize = s2u(size + extra);
|
usize = usize_max;
|
||||||
while (oldsize + followsize < usize)
|
while (oldsize + followsize < usize)
|
||||||
usize = index2size(size2index(usize)-1);
|
usize = index2size(size2index(usize)-1);
|
||||||
assert(usize >= usize_min);
|
assert(usize >= usize_min);
|
||||||
|
assert(usize >= oldsize);
|
||||||
splitsize = usize - oldsize;
|
splitsize = usize - oldsize;
|
||||||
|
if (splitsize == 0)
|
||||||
|
goto label_fail;
|
||||||
|
|
||||||
run = &arena_miscelm_get(chunk, pageind+npages)->run;
|
run = &arena_miscelm_get(chunk, pageind+npages)->run;
|
||||||
if (arena_run_split_large(arena, run, splitsize, zero)) {
|
if (arena_run_split_large(arena, run, splitsize, zero))
|
||||||
malloc_mutex_unlock(&arena->lock);
|
goto label_fail;
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
size = oldsize + splitsize;
|
size = oldsize + splitsize;
|
||||||
npages = (size + large_pad) >> LG_PAGE;
|
npages = (size + large_pad) >> LG_PAGE;
|
||||||
@ -2700,8 +2701,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|||||||
pageind+npages-1)));
|
pageind+npages-1)));
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
index_t oldindex = size2index(oldsize) - NBINS;
|
szind_t oldindex = size2index(oldsize) - NBINS;
|
||||||
index_t index = size2index(size) - NBINS;
|
szind_t index = size2index(size) - NBINS;
|
||||||
|
|
||||||
arena->stats.ndalloc_large++;
|
arena->stats.ndalloc_large++;
|
||||||
arena->stats.allocated_large -= oldsize;
|
arena->stats.allocated_large -= oldsize;
|
||||||
@ -2718,8 +2719,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
|||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
label_fail:
|
||||||
malloc_mutex_unlock(&arena->lock);
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2748,98 +2749,107 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
|
|||||||
* always fail if growing an object, and the following run is already in use.
|
* always fail if growing an object, and the following run is already in use.
|
||||||
*/
|
*/
|
||||||
static bool
|
static bool
|
||||||
arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
|
arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
|
||||||
bool zero)
|
size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t usize;
|
|
||||||
|
|
||||||
/* Make sure extra can't cause size_t overflow. */
|
|
||||||
if (unlikely(extra >= arena_maxclass))
|
|
||||||
return (true);
|
|
||||||
|
|
||||||
usize = s2u(size + extra);
|
|
||||||
if (usize == oldsize) {
|
|
||||||
/* Same size class. */
|
|
||||||
return (false);
|
|
||||||
} else {
|
|
||||||
arena_chunk_t *chunk;
|
arena_chunk_t *chunk;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
|
if (oldsize == usize_max) {
|
||||||
|
/* Current size class is compatible and maximal. */
|
||||||
|
return (false);
|
||||||
|
}
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||||
arena = extent_node_arena_get(&chunk->node);
|
arena = extent_node_arena_get(&chunk->node);
|
||||||
|
|
||||||
if (usize < oldsize) {
|
if (oldsize < usize_max) {
|
||||||
/* Fill before shrinking in order avoid a race. */
|
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
|
||||||
arena_ralloc_junk_large(ptr, oldsize, usize);
|
usize_min, usize_max, zero);
|
||||||
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
|
|
||||||
usize);
|
|
||||||
return (false);
|
|
||||||
} else {
|
|
||||||
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
|
|
||||||
oldsize, size, extra, zero);
|
|
||||||
if (config_fill && !ret && !zero) {
|
if (config_fill && !ret && !zero) {
|
||||||
if (unlikely(opt_junk_alloc)) {
|
if (unlikely(opt_junk_alloc)) {
|
||||||
memset((void *)((uintptr_t)ptr +
|
memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
|
||||||
oldsize), 0xa5, isalloc(ptr,
|
isalloc(ptr, config_prof) - oldsize);
|
||||||
config_prof) - oldsize);
|
|
||||||
} else if (unlikely(opt_zero)) {
|
} else if (unlikely(opt_zero)) {
|
||||||
memset((void *)((uintptr_t)ptr +
|
memset((void *)((uintptr_t)ptr + oldsize), 0,
|
||||||
oldsize), 0, isalloc(ptr,
|
isalloc(ptr, config_prof) - oldsize);
|
||||||
config_prof) - oldsize);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
assert(oldsize > usize_max);
|
||||||
|
/* Fill before shrinking in order avoid a race. */
|
||||||
|
arena_ralloc_junk_large(ptr, oldsize, usize_max);
|
||||||
|
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
|
||||||
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||||
bool zero)
|
bool zero)
|
||||||
{
|
{
|
||||||
|
size_t usize_min, usize_max;
|
||||||
|
|
||||||
if (likely(size <= arena_maxclass)) {
|
usize_min = s2u(size);
|
||||||
|
usize_max = s2u(size + extra);
|
||||||
|
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
|
||||||
/*
|
/*
|
||||||
* Avoid moving the allocation if the size class can be left the
|
* Avoid moving the allocation if the size class can be left the
|
||||||
* same.
|
* same.
|
||||||
*/
|
*/
|
||||||
if (likely(oldsize <= arena_maxclass)) {
|
|
||||||
if (oldsize <= SMALL_MAXCLASS) {
|
if (oldsize <= SMALL_MAXCLASS) {
|
||||||
assert(
|
assert(arena_bin_info[size2index(oldsize)].reg_size ==
|
||||||
arena_bin_info[size2index(oldsize)].reg_size
|
oldsize);
|
||||||
== oldsize);
|
if ((usize_max <= SMALL_MAXCLASS &&
|
||||||
if ((size + extra <= SMALL_MAXCLASS &&
|
size2index(usize_max) == size2index(oldsize)) ||
|
||||||
size2index(size + extra) ==
|
(size <= oldsize && usize_max >= oldsize))
|
||||||
size2index(oldsize)) || (size <= oldsize &&
|
|
||||||
size + extra >= oldsize))
|
|
||||||
return (false);
|
return (false);
|
||||||
} else {
|
} else {
|
||||||
assert(size <= arena_maxclass);
|
if (usize_max > SMALL_MAXCLASS) {
|
||||||
if (size + extra > SMALL_MAXCLASS) {
|
if (!arena_ralloc_large(ptr, oldsize, usize_min,
|
||||||
if (!arena_ralloc_large(ptr, oldsize,
|
usize_max, zero))
|
||||||
size, extra, zero))
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/* Reallocation would require a move. */
|
/* Reallocation would require a move. */
|
||||||
return (true);
|
return (true);
|
||||||
} else
|
} else {
|
||||||
return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
|
return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
|
||||||
|
zero));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *
|
||||||
|
arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
|
||||||
|
size_t alignment, bool zero, tcache_t *tcache)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (alignment == 0)
|
||||||
|
return (arena_malloc(tsd, arena, usize, zero, tcache));
|
||||||
|
usize = sa2u(usize, alignment);
|
||||||
|
if (usize == 0)
|
||||||
|
return (NULL);
|
||||||
|
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, size_t alignment, bool zero, tcache_t *tcache)
|
size_t alignment, bool zero, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
size_t usize;
|
||||||
|
|
||||||
if (likely(size <= arena_maxclass)) {
|
usize = s2u(size);
|
||||||
|
if (usize == 0)
|
||||||
|
return (NULL);
|
||||||
|
|
||||||
|
if (likely(usize <= large_maxclass)) {
|
||||||
size_t copysize;
|
size_t copysize;
|
||||||
|
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
|
if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
|
||||||
return (ptr);
|
return (ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2847,53 +2857,23 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
|||||||
* the object. In that case, fall back to allocating new space
|
* the object. In that case, fall back to allocating new space
|
||||||
* and copying.
|
* and copying.
|
||||||
*/
|
*/
|
||||||
if (alignment != 0) {
|
ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
|
||||||
size_t usize = sa2u(size + extra, alignment);
|
zero, tcache);
|
||||||
if (usize == 0)
|
|
||||||
return (NULL);
|
|
||||||
ret = ipalloct(tsd, usize, alignment, zero, tcache,
|
|
||||||
arena);
|
|
||||||
} else {
|
|
||||||
ret = arena_malloc(tsd, arena, size + extra, zero,
|
|
||||||
tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret == NULL) {
|
|
||||||
if (extra == 0)
|
|
||||||
return (NULL);
|
|
||||||
/* Try again, this time without extra. */
|
|
||||||
if (alignment != 0) {
|
|
||||||
size_t usize = sa2u(size, alignment);
|
|
||||||
if (usize == 0)
|
|
||||||
return (NULL);
|
|
||||||
ret = ipalloct(tsd, usize, alignment, zero,
|
|
||||||
tcache, arena);
|
|
||||||
} else {
|
|
||||||
ret = arena_malloc(tsd, arena, size, zero,
|
|
||||||
tcache);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Junk/zero-filling were already done by
|
* Junk/zero-filling were already done by
|
||||||
* ipalloc()/arena_malloc().
|
* ipalloc()/arena_malloc().
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
copysize = (usize < oldsize) ? usize : oldsize;
|
||||||
* Copy at most size bytes (not size+extra), since the caller
|
|
||||||
* has no expectation that the extra bytes will be reliably
|
|
||||||
* preserved.
|
|
||||||
*/
|
|
||||||
copysize = (size < oldsize) ? size : oldsize;
|
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
|
||||||
memcpy(ret, ptr, copysize);
|
memcpy(ret, ptr, copysize);
|
||||||
isqalloc(tsd, ptr, oldsize, tcache);
|
isqalloc(tsd, ptr, oldsize, tcache);
|
||||||
} else {
|
} else {
|
||||||
ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
|
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
|
||||||
alignment, zero, tcache);
|
zero, tcache);
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -3241,7 +3221,6 @@ small_run_size_init(void)
|
|||||||
bool
|
bool
|
||||||
arena_boot(void)
|
arena_boot(void)
|
||||||
{
|
{
|
||||||
size_t header_size;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
|
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
|
||||||
@ -3260,7 +3239,7 @@ arena_boot(void)
|
|||||||
*/
|
*/
|
||||||
map_bias = 0;
|
map_bias = 0;
|
||||||
for (i = 0; i < 3; i++) {
|
for (i = 0; i < 3; i++) {
|
||||||
header_size = offsetof(arena_chunk_t, map_bits) +
|
size_t header_size = offsetof(arena_chunk_t, map_bits) +
|
||||||
((sizeof(arena_chunk_map_bits_t) +
|
((sizeof(arena_chunk_map_bits_t) +
|
||||||
sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
|
sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
|
||||||
map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
|
map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
|
||||||
@ -3272,17 +3251,17 @@ arena_boot(void)
|
|||||||
|
|
||||||
arena_maxrun = chunksize - (map_bias << LG_PAGE);
|
arena_maxrun = chunksize - (map_bias << LG_PAGE);
|
||||||
assert(arena_maxrun > 0);
|
assert(arena_maxrun > 0);
|
||||||
arena_maxclass = index2size(size2index(chunksize)-1);
|
large_maxclass = index2size(size2index(chunksize)-1);
|
||||||
if (arena_maxclass > arena_maxrun) {
|
if (large_maxclass > arena_maxrun) {
|
||||||
/*
|
/*
|
||||||
* For small chunk sizes it's possible for there to be fewer
|
* For small chunk sizes it's possible for there to be fewer
|
||||||
* non-header pages available than are necessary to serve the
|
* non-header pages available than are necessary to serve the
|
||||||
* size classes just below chunksize.
|
* size classes just below chunksize.
|
||||||
*/
|
*/
|
||||||
arena_maxclass = arena_maxrun;
|
large_maxclass = arena_maxrun;
|
||||||
}
|
}
|
||||||
assert(arena_maxclass > 0);
|
assert(large_maxclass > 0);
|
||||||
nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS);
|
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
|
||||||
nhclasses = NSIZES - nlclasses - NBINS;
|
nhclasses = NSIZES - nlclasses - NBINS;
|
||||||
|
|
||||||
bin_info_init();
|
bin_info_init();
|
||||||
|
@ -69,8 +69,6 @@ void *
|
|||||||
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit)
|
bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
|
||||||
|
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
assert(size > 0 && (size & chunksize_mask) == 0);
|
||||||
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
||||||
@ -84,9 +82,6 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
|
|
||||||
malloc_mutex_lock(&dss_mtx);
|
malloc_mutex_lock(&dss_mtx);
|
||||||
if (dss_prev != (void *)-1) {
|
if (dss_prev != (void *)-1) {
|
||||||
size_t gap_size, cpad_size;
|
|
||||||
void *cpad, *dss_next;
|
|
||||||
intptr_t incr;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The loop is necessary to recover from races with other
|
* The loop is necessary to recover from races with other
|
||||||
@ -94,6 +89,9 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
|
|||||||
* malloc.
|
* malloc.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
|
void *ret, *cpad, *dss_next;
|
||||||
|
size_t gap_size, cpad_size;
|
||||||
|
intptr_t incr;
|
||||||
/* Avoid an unnecessary system call. */
|
/* Avoid an unnecessary system call. */
|
||||||
if (new_addr != NULL && dss_max != new_addr)
|
if (new_addr != NULL && dss_max != new_addr)
|
||||||
break;
|
break;
|
||||||
|
@ -6,14 +6,16 @@
|
|||||||
static void *
|
static void *
|
||||||
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
|
chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret, *pages;
|
void *ret;
|
||||||
size_t alloc_size, leadsize;
|
size_t alloc_size;
|
||||||
|
|
||||||
alloc_size = size + alignment - PAGE;
|
alloc_size = size + alignment - PAGE;
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
do {
|
do {
|
||||||
|
void *pages;
|
||||||
|
size_t leadsize;
|
||||||
pages = pages_map(NULL, alloc_size);
|
pages = pages_map(NULL, alloc_size);
|
||||||
if (pages == NULL)
|
if (pages == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
126
src/huge.c
126
src/huge.c
@ -126,18 +126,19 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
|
||||||
size_t size, size_t extra, bool zero)
|
size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t usize_next;
|
size_t usize, usize_next;
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
bool zeroed;
|
bool zeroed;
|
||||||
|
|
||||||
/* Increase usize to incorporate extra. */
|
/* Increase usize to incorporate extra. */
|
||||||
while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize)
|
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
|
||||||
usize = usize_next;
|
<= oldsize; usize = usize_next)
|
||||||
|
; /* Do nothing. */
|
||||||
|
|
||||||
if (oldsize == usize)
|
if (oldsize == usize)
|
||||||
return;
|
return;
|
||||||
@ -148,11 +149,12 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize,
|
|||||||
/* Fill if necessary (shrinking). */
|
/* Fill if necessary (shrinking). */
|
||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
size_t sdiff = oldsize - usize;
|
size_t sdiff = oldsize - usize;
|
||||||
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
|
|
||||||
CHUNK_CEILING(usize), usize, sdiff);
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
|
||||||
zeroed = false;
|
zeroed = false;
|
||||||
|
} else {
|
||||||
|
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks, ptr,
|
||||||
|
CHUNK_CEILING(oldsize), usize, sdiff);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
zeroed = true;
|
zeroed = true;
|
||||||
@ -194,6 +196,8 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
chunk_hooks = chunk_hooks_get(arena);
|
chunk_hooks = chunk_hooks_get(arena);
|
||||||
|
|
||||||
|
assert(oldsize > usize);
|
||||||
|
|
||||||
/* Split excess chunks. */
|
/* Split excess chunks. */
|
||||||
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||||
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
|
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
|
||||||
@ -202,14 +206,15 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
|
|
||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
size_t sdiff = oldsize - usize;
|
size_t sdiff = oldsize - usize;
|
||||||
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
|
||||||
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
|
||||||
CHUNK_CEILING(usize), CHUNK_ADDR2OFFSET((uintptr_t)ptr +
|
|
||||||
usize), sdiff);
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
|
||||||
sdiff);
|
sdiff);
|
||||||
zeroed = false;
|
zeroed = false;
|
||||||
|
} else {
|
||||||
|
zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
|
||||||
|
CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
|
||||||
|
CHUNK_CEILING(oldsize),
|
||||||
|
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
zeroed = true;
|
zeroed = true;
|
||||||
@ -228,18 +233,11 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
|
||||||
size_t usize;
|
|
||||||
extent_node_t *node;
|
extent_node_t *node;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
bool is_zeroed_subchunk, is_zeroed_chunk;
|
bool is_zeroed_subchunk, is_zeroed_chunk;
|
||||||
|
|
||||||
usize = s2u(size);
|
|
||||||
if (usize == 0) {
|
|
||||||
/* size_t overflow. */
|
|
||||||
return (true);
|
|
||||||
}
|
|
||||||
|
|
||||||
node = huge_node_get(ptr);
|
node = huge_node_get(ptr);
|
||||||
arena = extent_node_arena_get(node);
|
arena = extent_node_arena_get(node);
|
||||||
malloc_mutex_lock(&arena->huge_mtx);
|
malloc_mutex_lock(&arena->huge_mtx);
|
||||||
@ -280,89 +278,76 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
|
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min,
|
||||||
bool zero)
|
size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t usize;
|
|
||||||
|
|
||||||
/* Both allocations must be huge to avoid a move. */
|
|
||||||
if (oldsize < chunksize)
|
|
||||||
return (true);
|
|
||||||
|
|
||||||
assert(s2u(oldsize) == oldsize);
|
assert(s2u(oldsize) == oldsize);
|
||||||
usize = s2u(size);
|
|
||||||
if (usize == 0) {
|
/* Both allocations must be huge to avoid a move. */
|
||||||
/* size_t overflow. */
|
if (oldsize < chunksize || usize_max < chunksize)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
|
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
|
||||||
|
/* Attempt to expand the allocation in-place. */
|
||||||
|
if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero))
|
||||||
|
return (false);
|
||||||
|
/* Try again, this time with usize_min. */
|
||||||
|
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
||||||
|
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
|
||||||
|
oldsize, usize_min, zero))
|
||||||
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid moving the allocation if the existing chunk size accommodates
|
* Avoid moving the allocation if the existing chunk size accommodates
|
||||||
* the new size.
|
* the new size.
|
||||||
*/
|
*/
|
||||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)
|
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
|
||||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(s2u(size+extra))) {
|
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
|
||||||
huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra,
|
huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
|
||||||
zero);
|
zero);
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Attempt to shrink the allocation in-place. */
|
/* Attempt to shrink the allocation in-place. */
|
||||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize))
|
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max))
|
||||||
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize));
|
return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max));
|
||||||
|
|
||||||
/* Attempt to expand the allocation in-place. */
|
|
||||||
if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) {
|
|
||||||
if (extra == 0)
|
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
/* Try again, this time without extra. */
|
|
||||||
return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero));
|
|
||||||
}
|
}
|
||||||
return (false);
|
|
||||||
|
static void *
|
||||||
|
huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
|
||||||
|
size_t alignment, bool zero, tcache_t *tcache)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (alignment <= chunksize)
|
||||||
|
return (huge_malloc(tsd, arena, usize, zero, tcache));
|
||||||
|
return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
|
huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
|
||||||
size_t extra, size_t alignment, bool zero, tcache_t *tcache)
|
size_t alignment, bool zero, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t copysize;
|
size_t copysize;
|
||||||
|
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero))
|
if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero))
|
||||||
return (ptr);
|
return (ptr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* size and oldsize are different enough that we need to use a
|
* usize and oldsize are different enough that we need to use a
|
||||||
* different size class. In that case, fall back to allocating new
|
* different size class. In that case, fall back to allocating new
|
||||||
* space and copying.
|
* space and copying.
|
||||||
*/
|
*/
|
||||||
if (alignment > chunksize) {
|
ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
|
||||||
ret = huge_palloc(tsd, arena, size + extra, alignment, zero,
|
|
||||||
tcache);
|
tcache);
|
||||||
} else
|
|
||||||
ret = huge_malloc(tsd, arena, size + extra, zero, tcache);
|
|
||||||
|
|
||||||
if (ret == NULL) {
|
|
||||||
if (extra == 0)
|
|
||||||
return (NULL);
|
|
||||||
/* Try again, this time without extra. */
|
|
||||||
if (alignment > chunksize) {
|
|
||||||
ret = huge_palloc(tsd, arena, size, alignment, zero,
|
|
||||||
tcache);
|
|
||||||
} else
|
|
||||||
ret = huge_malloc(tsd, arena, size, zero, tcache);
|
|
||||||
|
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
copysize = (usize < oldsize) ? usize : oldsize;
|
||||||
* Copy at most size bytes (not size+extra), since the caller has no
|
|
||||||
* expectation that the extra bytes will be reliably preserved.
|
|
||||||
*/
|
|
||||||
copysize = (size < oldsize) ? size : oldsize;
|
|
||||||
memcpy(ret, ptr, copysize);
|
memcpy(ret, ptr, copysize);
|
||||||
isqalloc(tsd, ptr, oldsize, tcache);
|
isqalloc(tsd, ptr, oldsize, tcache);
|
||||||
return (ret);
|
return (ret);
|
||||||
@ -439,3 +424,10 @@ huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
|
|||||||
extent_node_prof_tctx_set(node, tctx);
|
extent_node_prof_tctx_set(node, tctx);
|
||||||
malloc_mutex_unlock(&arena->huge_mtx);
|
malloc_mutex_unlock(&arena->huge_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
huge_prof_tctx_reset(const void *ptr)
|
||||||
|
{
|
||||||
|
|
||||||
|
huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
|
||||||
|
}
|
||||||
|
113
src/jemalloc.c
113
src/jemalloc.c
@ -179,13 +179,24 @@ static bool malloc_initializer = NO_INITIALIZER;
|
|||||||
static malloc_mutex_t init_lock = SRWLOCK_INIT;
|
static malloc_mutex_t init_lock = SRWLOCK_INIT;
|
||||||
#else
|
#else
|
||||||
static malloc_mutex_t init_lock;
|
static malloc_mutex_t init_lock;
|
||||||
|
static bool init_lock_initialized = false;
|
||||||
|
|
||||||
JEMALLOC_ATTR(constructor)
|
JEMALLOC_ATTR(constructor)
|
||||||
static void WINAPI
|
static void WINAPI
|
||||||
_init_init_lock(void)
|
_init_init_lock(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
/* If another constructor in the same binary is using mallctl to
|
||||||
|
* e.g. setup chunk hooks, it may end up running before this one,
|
||||||
|
* and malloc_init_hard will crash trying to lock the uninitialized
|
||||||
|
* lock. So we force an initialization of the lock in
|
||||||
|
* malloc_init_hard as well. We don't try to care about atomicity
|
||||||
|
* of the accessed to the init_lock_initialized boolean, since it
|
||||||
|
* really only matters early in the process creation, before any
|
||||||
|
* separate thread normally starts doing anything. */
|
||||||
|
if (!init_lock_initialized)
|
||||||
malloc_mutex_init(&init_lock);
|
malloc_mutex_init(&init_lock);
|
||||||
|
init_lock_initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
@ -510,17 +521,17 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
|||||||
assert(ind < narenas_actual || !init_if_missing);
|
assert(ind < narenas_actual || !init_if_missing);
|
||||||
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
|
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
|
||||||
|
|
||||||
if (!*arenas_cache_bypassp) {
|
if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
|
||||||
*arenas_cache_bypassp = true;
|
*arenas_cache_bypassp = true;
|
||||||
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
|
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
|
||||||
narenas_cache);
|
narenas_cache);
|
||||||
*arenas_cache_bypassp = false;
|
*arenas_cache_bypassp = false;
|
||||||
} else
|
}
|
||||||
arenas_cache = NULL;
|
|
||||||
if (arenas_cache == NULL) {
|
if (arenas_cache == NULL) {
|
||||||
/*
|
/*
|
||||||
* This function must always tell the truth, even if
|
* This function must always tell the truth, even if
|
||||||
* it's slow, so don't let OOM or recursive allocation
|
* it's slow, so don't let OOM, thread cleanup (note
|
||||||
|
* tsd_nominal check), nor recursive allocation
|
||||||
* avoidance (note arenas_cache_bypass check) get in the
|
* avoidance (note arenas_cache_bypass check) get in the
|
||||||
* way.
|
* way.
|
||||||
*/
|
*/
|
||||||
@ -531,6 +542,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
|||||||
malloc_mutex_unlock(&arenas_lock);
|
malloc_mutex_unlock(&arenas_lock);
|
||||||
return (arena);
|
return (arena);
|
||||||
}
|
}
|
||||||
|
assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
|
||||||
tsd_arenas_cache_set(tsd, arenas_cache);
|
tsd_arenas_cache_set(tsd, arenas_cache);
|
||||||
tsd_narenas_cache_set(tsd, narenas_cache);
|
tsd_narenas_cache_set(tsd, narenas_cache);
|
||||||
}
|
}
|
||||||
@ -649,9 +661,11 @@ arenas_cache_cleanup(tsd_t *tsd)
|
|||||||
arena_t **arenas_cache;
|
arena_t **arenas_cache;
|
||||||
|
|
||||||
arenas_cache = tsd_arenas_cache_get(tsd);
|
arenas_cache = tsd_arenas_cache_get(tsd);
|
||||||
if (arenas_cache != NULL)
|
if (arenas_cache != NULL) {
|
||||||
|
tsd_arenas_cache_set(tsd, NULL);
|
||||||
a0dalloc(arenas_cache);
|
a0dalloc(arenas_cache);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
narenas_cache_cleanup(tsd_t *tsd)
|
narenas_cache_cleanup(tsd_t *tsd)
|
||||||
@ -1297,6 +1311,9 @@ static bool
|
|||||||
malloc_init_hard(void)
|
malloc_init_hard(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
|
||||||
|
_init_init_lock();
|
||||||
|
#endif
|
||||||
malloc_mutex_lock(&init_lock);
|
malloc_mutex_lock(&init_lock);
|
||||||
if (!malloc_init_hard_needed()) {
|
if (!malloc_init_hard_needed()) {
|
||||||
malloc_mutex_unlock(&init_lock);
|
malloc_mutex_unlock(&init_lock);
|
||||||
@ -1361,7 +1378,7 @@ imalloc_prof(tsd_t *tsd, size_t usize)
|
|||||||
void *p;
|
void *p;
|
||||||
prof_tctx_t *tctx;
|
prof_tctx_t *tctx;
|
||||||
|
|
||||||
tctx = prof_alloc_prep(tsd, usize, true);
|
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||||
p = imalloc_prof_sample(tsd, usize, tctx);
|
p = imalloc_prof_sample(tsd, usize, tctx);
|
||||||
else
|
else
|
||||||
@ -1451,7 +1468,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
|
|||||||
void *p;
|
void *p;
|
||||||
prof_tctx_t *tctx;
|
prof_tctx_t *tctx;
|
||||||
|
|
||||||
tctx = prof_alloc_prep(tsd, usize, true);
|
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||||
p = imemalign_prof_sample(tsd, alignment, usize, tctx);
|
p = imemalign_prof_sample(tsd, alignment, usize, tctx);
|
||||||
else
|
else
|
||||||
@ -1582,7 +1599,7 @@ icalloc_prof(tsd_t *tsd, size_t usize)
|
|||||||
void *p;
|
void *p;
|
||||||
prof_tctx_t *tctx;
|
prof_tctx_t *tctx;
|
||||||
|
|
||||||
tctx = prof_alloc_prep(tsd, usize, true);
|
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||||
p = icalloc_prof_sample(tsd, usize, tctx);
|
p = icalloc_prof_sample(tsd, usize, tctx);
|
||||||
else
|
else
|
||||||
@ -1665,7 +1682,7 @@ label_return:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
|
irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
||||||
prof_tctx_t *tctx)
|
prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
@ -1673,31 +1690,36 @@ irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
|
|||||||
if (tctx == NULL)
|
if (tctx == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (usize <= SMALL_MAXCLASS) {
|
if (usize <= SMALL_MAXCLASS) {
|
||||||
p = iralloc(tsd, oldptr, old_usize, LARGE_MINCLASS, 0, false);
|
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(p, usize);
|
arena_prof_promoted(p, usize);
|
||||||
} else
|
} else
|
||||||
p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
|
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
|
||||||
|
|
||||||
return (p);
|
return (p);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE_C void *
|
||||||
irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
|
irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
|
bool prof_active;
|
||||||
prof_tctx_t *old_tctx, *tctx;
|
prof_tctx_t *old_tctx, *tctx;
|
||||||
|
|
||||||
old_tctx = prof_tctx_get(oldptr);
|
prof_active = prof_active_get_unlocked();
|
||||||
tctx = prof_alloc_prep(tsd, usize, true);
|
old_tctx = prof_tctx_get(old_ptr);
|
||||||
|
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
|
||||||
p = irealloc_prof_sample(tsd, oldptr, old_usize, usize, tctx);
|
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
|
||||||
else
|
else
|
||||||
p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
|
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
|
||||||
if (p == NULL)
|
if (unlikely(p == NULL)) {
|
||||||
|
prof_alloc_rollback(tsd, tctx, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
|
}
|
||||||
|
prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
|
||||||
|
old_tctx);
|
||||||
|
|
||||||
return (p);
|
return (p);
|
||||||
}
|
}
|
||||||
@ -1995,7 +2017,7 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
|
|||||||
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
|
||||||
&zero, &tcache, &arena)))
|
&zero, &tcache, &arena)))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
tctx = prof_alloc_prep(tsd, *usize, true);
|
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
|
||||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
||||||
p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
|
p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
|
||||||
zero, tcache, arena);
|
zero, tcache, arena);
|
||||||
@ -2076,7 +2098,7 @@ label_oom:
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
||||||
size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
|
size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
|
||||||
prof_tctx_t *tctx)
|
prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
@ -2085,13 +2107,13 @@ irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
|||||||
if (tctx == NULL)
|
if (tctx == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
if (usize <= SMALL_MAXCLASS) {
|
if (usize <= SMALL_MAXCLASS) {
|
||||||
p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
|
p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
|
||||||
zero, tcache, arena);
|
zero, tcache, arena);
|
||||||
if (p == NULL)
|
if (p == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
arena_prof_promoted(p, usize);
|
arena_prof_promoted(p, usize);
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
|
p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
|
||||||
tcache, arena);
|
tcache, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2099,28 +2121,30 @@ irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE_C void *
|
JEMALLOC_ALWAYS_INLINE_C void *
|
||||||
irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
||||||
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
|
||||||
arena_t *arena)
|
arena_t *arena)
|
||||||
{
|
{
|
||||||
void *p;
|
void *p;
|
||||||
|
bool prof_active;
|
||||||
prof_tctx_t *old_tctx, *tctx;
|
prof_tctx_t *old_tctx, *tctx;
|
||||||
|
|
||||||
old_tctx = prof_tctx_get(oldptr);
|
prof_active = prof_active_get_unlocked();
|
||||||
tctx = prof_alloc_prep(tsd, *usize, false);
|
old_tctx = prof_tctx_get(old_ptr);
|
||||||
|
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||||
p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
|
p = irallocx_prof_sample(tsd, old_ptr, old_usize, size,
|
||||||
alignment, *usize, zero, tcache, arena, tctx);
|
alignment, *usize, zero, tcache, arena, tctx);
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
|
p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
|
||||||
tcache, arena);
|
tcache, arena);
|
||||||
}
|
}
|
||||||
if (unlikely(p == NULL)) {
|
if (unlikely(p == NULL)) {
|
||||||
prof_alloc_rollback(tsd, tctx, false);
|
prof_alloc_rollback(tsd, tctx, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (p == oldptr && alignment != 0) {
|
if (p == old_ptr && alignment != 0) {
|
||||||
/*
|
/*
|
||||||
* The allocation did not move, so it is possible that the size
|
* The allocation did not move, so it is possible that the size
|
||||||
* class is smaller than would guarantee the requested
|
* class is smaller than would guarantee the requested
|
||||||
@ -2131,7 +2155,8 @@ irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
|
|||||||
*/
|
*/
|
||||||
*usize = isalloc(p, config_prof);
|
*usize = isalloc(p, config_prof);
|
||||||
}
|
}
|
||||||
prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
|
prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
|
||||||
|
old_usize, old_tctx);
|
||||||
|
|
||||||
return (p);
|
return (p);
|
||||||
}
|
}
|
||||||
@ -2226,7 +2251,7 @@ ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
|
|||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
|
ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
|
||||||
size_t alignment, size_t max_usize, bool zero, prof_tctx_t *tctx)
|
size_t alignment, size_t usize_max, bool zero, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
size_t usize;
|
size_t usize;
|
||||||
|
|
||||||
@ -2240,7 +2265,7 @@ ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
|
|||||||
(SMALL_MAXCLASS+1), alignment, zero))
|
(SMALL_MAXCLASS+1), alignment, zero))
|
||||||
return (old_usize);
|
return (old_usize);
|
||||||
usize = isalloc(ptr, config_prof);
|
usize = isalloc(ptr, config_prof);
|
||||||
if (max_usize < LARGE_MINCLASS)
|
if (usize_max < LARGE_MINCLASS)
|
||||||
arena_prof_promoted(ptr, usize);
|
arena_prof_promoted(ptr, usize);
|
||||||
} else {
|
} else {
|
||||||
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
||||||
@ -2254,9 +2279,11 @@ JEMALLOC_ALWAYS_INLINE_C size_t
|
|||||||
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
||||||
size_t extra, size_t alignment, bool zero)
|
size_t extra, size_t alignment, bool zero)
|
||||||
{
|
{
|
||||||
size_t max_usize, usize;
|
size_t usize_max, usize;
|
||||||
|
bool prof_active;
|
||||||
prof_tctx_t *old_tctx, *tctx;
|
prof_tctx_t *old_tctx, *tctx;
|
||||||
|
|
||||||
|
prof_active = prof_active_get_unlocked();
|
||||||
old_tctx = prof_tctx_get(ptr);
|
old_tctx = prof_tctx_get(ptr);
|
||||||
/*
|
/*
|
||||||
* usize isn't knowable before ixalloc() returns when extra is non-zero.
|
* usize isn't knowable before ixalloc() returns when extra is non-zero.
|
||||||
@ -2264,12 +2291,12 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
|||||||
* prof_alloc_prep() to decide whether to capture a backtrace.
|
* prof_alloc_prep() to decide whether to capture a backtrace.
|
||||||
* prof_realloc() will use the actual usize to decide whether to sample.
|
* prof_realloc() will use the actual usize to decide whether to sample.
|
||||||
*/
|
*/
|
||||||
max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
|
usize_max = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
|
||||||
alignment);
|
alignment);
|
||||||
tctx = prof_alloc_prep(tsd, max_usize, false);
|
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
|
||||||
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
|
||||||
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
|
usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
|
||||||
alignment, zero, max_usize, tctx);
|
alignment, usize_max, zero, tctx);
|
||||||
} else {
|
} else {
|
||||||
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
|
||||||
zero);
|
zero);
|
||||||
@ -2278,7 +2305,8 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
|||||||
prof_alloc_rollback(tsd, tctx, false);
|
prof_alloc_rollback(tsd, tctx, false);
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
|
prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
|
||||||
|
old_tctx);
|
||||||
|
|
||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
@ -2300,6 +2328,17 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
|
|||||||
tsd = tsd_fetch();
|
tsd = tsd_fetch();
|
||||||
|
|
||||||
old_usize = isalloc(ptr, config_prof);
|
old_usize = isalloc(ptr, config_prof);
|
||||||
|
|
||||||
|
/* Clamp extra if necessary to avoid (size + extra) overflow. */
|
||||||
|
if (unlikely(size + extra > HUGE_MAXCLASS)) {
|
||||||
|
/* Check for size overflow. */
|
||||||
|
if (unlikely(size > HUGE_MAXCLASS)) {
|
||||||
|
usize = old_usize;
|
||||||
|
goto label_not_resized;
|
||||||
|
}
|
||||||
|
extra = HUGE_MAXCLASS - size;
|
||||||
|
}
|
||||||
|
|
||||||
if (config_valgrind && unlikely(in_valgrind))
|
if (config_valgrind && unlikely(in_valgrind))
|
||||||
old_rzsize = u2rz(old_usize);
|
old_rzsize = u2rz(old_usize);
|
||||||
|
|
||||||
|
16
src/prof.c
16
src/prof.c
@ -138,10 +138,17 @@ prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
|
|||||||
uint64_t a_thr_uid = a->thr_uid;
|
uint64_t a_thr_uid = a->thr_uid;
|
||||||
uint64_t b_thr_uid = b->thr_uid;
|
uint64_t b_thr_uid = b->thr_uid;
|
||||||
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
|
int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
|
||||||
|
if (ret == 0) {
|
||||||
|
uint64_t a_thr_discrim = a->thr_discrim;
|
||||||
|
uint64_t b_thr_discrim = b->thr_discrim;
|
||||||
|
ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
|
||||||
|
b_thr_discrim);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
uint64_t a_tctx_uid = a->tctx_uid;
|
uint64_t a_tctx_uid = a->tctx_uid;
|
||||||
uint64_t b_tctx_uid = b->tctx_uid;
|
uint64_t b_tctx_uid = b->tctx_uid;
|
||||||
ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid);
|
ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
|
||||||
|
b_tctx_uid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -219,7 +226,7 @@ void
|
|||||||
prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
|
||||||
{
|
{
|
||||||
|
|
||||||
prof_tctx_set(ptr, tctx);
|
prof_tctx_set(ptr, usize, tctx);
|
||||||
|
|
||||||
malloc_mutex_lock(tctx->tdata->lock);
|
malloc_mutex_lock(tctx->tdata->lock);
|
||||||
tctx->cnts.curobjs++;
|
tctx->cnts.curobjs++;
|
||||||
@ -791,6 +798,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
|
|||||||
}
|
}
|
||||||
ret.p->tdata = tdata;
|
ret.p->tdata = tdata;
|
||||||
ret.p->thr_uid = tdata->thr_uid;
|
ret.p->thr_uid = tdata->thr_uid;
|
||||||
|
ret.p->thr_discrim = tdata->thr_discrim;
|
||||||
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
|
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
|
||||||
ret.p->gctx = gctx;
|
ret.p->gctx = gctx;
|
||||||
ret.p->tctx_uid = tdata->tctx_uid_next++;
|
ret.p->tctx_uid = tdata->tctx_uid_next++;
|
||||||
@ -1569,7 +1577,6 @@ prof_idump(void)
|
|||||||
{
|
{
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
prof_tdata_t *tdata;
|
prof_tdata_t *tdata;
|
||||||
char filename[PATH_MAX + 1];
|
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
@ -1585,6 +1592,7 @@ prof_idump(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (opt_prof_prefix[0] != '\0') {
|
if (opt_prof_prefix[0] != '\0') {
|
||||||
|
char filename[PATH_MAX + 1];
|
||||||
malloc_mutex_lock(&prof_dump_seq_mtx);
|
malloc_mutex_lock(&prof_dump_seq_mtx);
|
||||||
prof_dump_filename(filename, 'i', prof_dump_iseq);
|
prof_dump_filename(filename, 'i', prof_dump_iseq);
|
||||||
prof_dump_iseq++;
|
prof_dump_iseq++;
|
||||||
@ -1623,7 +1631,6 @@ prof_gdump(void)
|
|||||||
{
|
{
|
||||||
tsd_t *tsd;
|
tsd_t *tsd;
|
||||||
prof_tdata_t *tdata;
|
prof_tdata_t *tdata;
|
||||||
char filename[DUMP_FILENAME_BUFSIZE];
|
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
|
|
||||||
@ -1639,6 +1646,7 @@ prof_gdump(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (opt_prof_prefix[0] != '\0') {
|
if (opt_prof_prefix[0] != '\0') {
|
||||||
|
char filename[DUMP_FILENAME_BUFSIZE];
|
||||||
malloc_mutex_lock(&prof_dump_seq_mtx);
|
malloc_mutex_lock(&prof_dump_seq_mtx);
|
||||||
prof_dump_filename(filename, 'u', prof_dump_useq);
|
prof_dump_filename(filename, 'u', prof_dump_useq);
|
||||||
prof_dump_useq++;
|
prof_dump_useq++;
|
||||||
|
14
src/tcache.c
14
src/tcache.c
@ -32,7 +32,7 @@ size_t tcache_salloc(const void *ptr)
|
|||||||
void
|
void
|
||||||
tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
index_t binind = tcache->next_gc_bin;
|
szind_t binind = tcache->next_gc_bin;
|
||||||
tcache_bin_t *tbin = &tcache->tbins[binind];
|
tcache_bin_t *tbin = &tcache->tbins[binind];
|
||||||
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||||
tcache_bin_t *tbin, index_t binind)
|
tcache_bin_t *tbin, szind_t binind)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||||
index_t binind, unsigned rem)
|
szind_t binind, unsigned rem)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
@ -166,7 +166,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
|
tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||||
unsigned rem, tcache_t *tcache)
|
unsigned rem, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
@ -496,13 +496,13 @@ tcache_boot(void)
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
|
* If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
|
||||||
* known.
|
* known.
|
||||||
*/
|
*/
|
||||||
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
|
||||||
tcache_maxclass = SMALL_MAXCLASS;
|
tcache_maxclass = SMALL_MAXCLASS;
|
||||||
else if ((1U << opt_lg_tcache_max) > arena_maxclass)
|
else if ((1U << opt_lg_tcache_max) > large_maxclass)
|
||||||
tcache_maxclass = arena_maxclass;
|
tcache_maxclass = large_maxclass;
|
||||||
else
|
else
|
||||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||||
|
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
#include "test/jemalloc_test.h"
|
#include "test/jemalloc_test.h"
|
||||||
|
|
||||||
|
#ifdef JEMALLOC_FILL
|
||||||
|
const char *malloc_conf = "junk:false";
|
||||||
|
#endif
|
||||||
|
|
||||||
static chunk_hooks_t orig_hooks;
|
static chunk_hooks_t orig_hooks;
|
||||||
static chunk_hooks_t old_hooks;
|
static chunk_hooks_t old_hooks;
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ TEST_BEGIN(test_grow_and_shrink)
|
|||||||
szs[j-1], szs[j-1]+1);
|
szs[j-1], szs[j-1]+1);
|
||||||
szs[j] = sallocx(q, 0);
|
szs[j] = sallocx(q, 0);
|
||||||
assert_zu_ne(szs[j], szs[j-1]+1,
|
assert_zu_ne(szs[j], szs[j-1]+1,
|
||||||
"Expected size to at least: %zu", szs[j-1]+1);
|
"Expected size to be at least: %zu", szs[j-1]+1);
|
||||||
p = q;
|
p = q;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,305 @@ TEST_BEGIN(test_no_move_fail)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
|
static unsigned
|
||||||
|
get_nsizes_impl(const char *cmd)
|
||||||
|
{
|
||||||
|
unsigned ret;
|
||||||
|
size_t z;
|
||||||
|
|
||||||
|
z = sizeof(unsigned);
|
||||||
|
assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0,
|
||||||
|
"Unexpected mallctl(\"%s\", ...) failure", cmd);
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned
|
||||||
|
get_nsmall(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (get_nsizes_impl("arenas.nbins"));
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned
|
||||||
|
get_nlarge(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (get_nsizes_impl("arenas.nlruns"));
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned
|
||||||
|
get_nhuge(void)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (get_nsizes_impl("arenas.nhchunks"));
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
get_size_impl(const char *cmd, size_t ind)
|
||||||
|
{
|
||||||
|
size_t ret;
|
||||||
|
size_t z;
|
||||||
|
size_t mib[4];
|
||||||
|
size_t miblen = 4;
|
||||||
|
|
||||||
|
z = sizeof(size_t);
|
||||||
|
assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
|
||||||
|
0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
|
||||||
|
mib[2] = ind;
|
||||||
|
z = sizeof(size_t);
|
||||||
|
assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0),
|
||||||
|
0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind);
|
||||||
|
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
get_small_size(size_t ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (get_size_impl("arenas.bin.0.size", ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
get_large_size(size_t ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (get_size_impl("arenas.lrun.0.size", ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
get_huge_size(size_t ind)
|
||||||
|
{
|
||||||
|
|
||||||
|
return (get_size_impl("arenas.hchunk.0.size", ind));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_BEGIN(test_size)
|
||||||
|
{
|
||||||
|
size_t small0, hugemax;
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
/* Get size classes. */
|
||||||
|
small0 = get_small_size(0);
|
||||||
|
hugemax = get_huge_size(get_nhuge()-1);
|
||||||
|
|
||||||
|
p = mallocx(small0, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
|
||||||
|
/* Test smallest supported size. */
|
||||||
|
assert_zu_eq(xallocx(p, 1, 0, 0), small0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
/* Test largest supported size. */
|
||||||
|
assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
/* Test size overflow. */
|
||||||
|
assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
dallocx(p, 0);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_size_extra_overflow)
|
||||||
|
{
|
||||||
|
size_t small0, hugemax;
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
/* Get size classes. */
|
||||||
|
small0 = get_small_size(0);
|
||||||
|
hugemax = get_huge_size(get_nhuge()-1);
|
||||||
|
|
||||||
|
p = mallocx(small0, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
|
||||||
|
/* Test overflows that can be resolved by clamping extra. */
|
||||||
|
assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
/* Test overflow such that hugemax-size underflows. */
|
||||||
|
assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
dallocx(p, 0);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_extra_small)
|
||||||
|
{
|
||||||
|
size_t small0, small1, hugemax;
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
/* Get size classes. */
|
||||||
|
small0 = get_small_size(0);
|
||||||
|
small1 = get_small_size(1);
|
||||||
|
hugemax = get_huge_size(get_nhuge()-1);
|
||||||
|
|
||||||
|
p = mallocx(small0, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, small1, 0, 0), small0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, small1, 0, 0), small0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
/* Test size+extra overflow. */
|
||||||
|
assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
dallocx(p, 0);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_extra_large)
|
||||||
|
{
|
||||||
|
size_t smallmax, large0, large1, large2, huge0, hugemax;
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
/* Get size classes. */
|
||||||
|
smallmax = get_small_size(get_nsmall()-1);
|
||||||
|
large0 = get_large_size(0);
|
||||||
|
large1 = get_large_size(1);
|
||||||
|
large2 = get_large_size(2);
|
||||||
|
huge0 = get_huge_size(0);
|
||||||
|
hugemax = get_huge_size(get_nhuge()-1);
|
||||||
|
|
||||||
|
p = mallocx(large2, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, large2, 0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size decrease with zero extra. */
|
||||||
|
assert_zu_eq(xallocx(p, large0, 0, 0), large0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, smallmax, 0, 0), large0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, large2, 0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size decrease with non-zero extra. */
|
||||||
|
assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, large0, 0, 0), large0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size increase with zero extra. */
|
||||||
|
assert_zu_eq(xallocx(p, large2, 0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, huge0, 0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, large0, 0, 0), large0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size increase with non-zero extra. */
|
||||||
|
assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, large0, 0, 0), large0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size increase with non-zero extra. */
|
||||||
|
assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, large2, 0, 0), large2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size+extra overflow. */
|
||||||
|
assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
dallocx(p, 0);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
|
TEST_BEGIN(test_extra_huge)
|
||||||
|
{
|
||||||
|
size_t largemax, huge0, huge1, huge2, hugemax;
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
/* Get size classes. */
|
||||||
|
largemax = get_large_size(get_nlarge()-1);
|
||||||
|
huge0 = get_huge_size(0);
|
||||||
|
huge1 = get_huge_size(1);
|
||||||
|
huge2 = get_huge_size(2);
|
||||||
|
hugemax = get_huge_size(get_nhuge()-1);
|
||||||
|
|
||||||
|
p = mallocx(huge2, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size decrease with zero extra. */
|
||||||
|
assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_ge(xallocx(p, largemax, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size decrease with non-zero extra. */
|
||||||
|
assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_ge(xallocx(p, largemax, huge0 - largemax, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size increase with zero extra. */
|
||||||
|
assert_zu_le(xallocx(p, huge2, 0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
assert_zu_le(xallocx(p, hugemax+1, 0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size increase with non-zero extra. */
|
||||||
|
assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_ge(xallocx(p, huge0, 0, 0), huge0,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size increase with non-zero extra. */
|
||||||
|
assert_zu_le(xallocx(p, huge0, huge2 - huge0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
assert_zu_eq(xallocx(p, huge2, 0, 0), huge2,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
/* Test size+extra overflow. */
|
||||||
|
assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax,
|
||||||
|
"Unexpected xallocx() behavior");
|
||||||
|
|
||||||
|
dallocx(p, 0);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
|
||||||
int
|
int
|
||||||
main(void)
|
main(void)
|
||||||
{
|
{
|
||||||
@ -55,5 +354,10 @@ main(void)
|
|||||||
return (test(
|
return (test(
|
||||||
test_same_size,
|
test_same_size,
|
||||||
test_extra_no_move,
|
test_extra_no_move,
|
||||||
test_no_move_fail));
|
test_no_move_fail,
|
||||||
|
test_size,
|
||||||
|
test_size_extra_overflow,
|
||||||
|
test_extra_small,
|
||||||
|
test_extra_large,
|
||||||
|
test_extra_huge));
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ TEST_BEGIN(test_junk_large)
|
|||||||
{
|
{
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
test_skip_if(!config_fill);
|
||||||
test_junk(SMALL_MAXCLASS+1, arena_maxclass);
|
test_junk(SMALL_MAXCLASS+1, large_maxclass);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -148,7 +148,7 @@ TEST_BEGIN(test_junk_huge)
|
|||||||
{
|
{
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
test_skip_if(!config_fill);
|
||||||
test_junk(arena_maxclass+1, chunksize*2);
|
test_junk(large_maxclass+1, chunksize*2);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -172,8 +172,8 @@ arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize)
|
|||||||
{
|
{
|
||||||
|
|
||||||
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
|
arena_ralloc_junk_large_orig(ptr, old_usize, usize);
|
||||||
assert_zu_eq(old_usize, arena_maxclass, "Unexpected old_usize");
|
assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize");
|
||||||
assert_zu_eq(usize, shrink_size(arena_maxclass), "Unexpected usize");
|
assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize");
|
||||||
most_recently_trimmed = ptr;
|
most_recently_trimmed = ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,13 +181,13 @@ TEST_BEGIN(test_junk_large_ralloc_shrink)
|
|||||||
{
|
{
|
||||||
void *p1, *p2;
|
void *p1, *p2;
|
||||||
|
|
||||||
p1 = mallocx(arena_maxclass, 0);
|
p1 = mallocx(large_maxclass, 0);
|
||||||
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p1, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
|
arena_ralloc_junk_large_orig = arena_ralloc_junk_large;
|
||||||
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
|
arena_ralloc_junk_large = arena_ralloc_junk_large_intercept;
|
||||||
|
|
||||||
p2 = rallocx(p1, shrink_size(arena_maxclass), 0);
|
p2 = rallocx(p1, shrink_size(large_maxclass), 0);
|
||||||
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
|
assert_ptr_eq(p1, p2, "Unexpected move during shrink");
|
||||||
|
|
||||||
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
|
arena_ralloc_junk_large = arena_ralloc_junk_large_orig;
|
||||||
|
@ -16,6 +16,35 @@ prof_dump_open_intercept(bool propagate_err, const char *filename)
|
|||||||
return (fd);
|
return (fd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
set_prof_active(bool active)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
||||||
|
0, "Unexpected mallctl failure");
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t
|
||||||
|
get_lg_prof_sample(void)
|
||||||
|
{
|
||||||
|
size_t lg_prof_sample;
|
||||||
|
size_t sz = sizeof(size_t);
|
||||||
|
|
||||||
|
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
|
||||||
|
"Unexpected mallctl failure while reading profiling sample rate");
|
||||||
|
return (lg_prof_sample);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
do_prof_reset(size_t lg_prof_sample)
|
||||||
|
{
|
||||||
|
assert_d_eq(mallctl("prof.reset", NULL, NULL,
|
||||||
|
&lg_prof_sample, sizeof(size_t)), 0,
|
||||||
|
"Unexpected mallctl failure while resetting profile data");
|
||||||
|
assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
|
||||||
|
"Expected profile sample rate change");
|
||||||
|
}
|
||||||
|
|
||||||
TEST_BEGIN(test_prof_reset_basic)
|
TEST_BEGIN(test_prof_reset_basic)
|
||||||
{
|
{
|
||||||
size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
|
size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
|
||||||
@ -30,9 +59,7 @@ TEST_BEGIN(test_prof_reset_basic)
|
|||||||
"Unexpected mallctl failure while reading profiling sample rate");
|
"Unexpected mallctl failure while reading profiling sample rate");
|
||||||
assert_zu_eq(lg_prof_sample_orig, 0,
|
assert_zu_eq(lg_prof_sample_orig, 0,
|
||||||
"Unexpected profiling sample rate");
|
"Unexpected profiling sample rate");
|
||||||
sz = sizeof(size_t);
|
lg_prof_sample = get_lg_prof_sample();
|
||||||
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure while reading profiling sample rate");
|
|
||||||
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
||||||
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
|
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
|
||||||
"\"prof.lg_sample\"");
|
"\"prof.lg_sample\"");
|
||||||
@ -41,10 +68,7 @@ TEST_BEGIN(test_prof_reset_basic)
|
|||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
|
assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
|
||||||
"Unexpected mallctl failure while resetting profile data");
|
"Unexpected mallctl failure while resetting profile data");
|
||||||
sz = sizeof(size_t);
|
lg_prof_sample = get_lg_prof_sample();
|
||||||
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz,
|
|
||||||
NULL, 0), 0, "Unexpected mallctl failure while reading "
|
|
||||||
"profiling sample rate");
|
|
||||||
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
||||||
"Unexpected profile sample rate change");
|
"Unexpected profile sample rate change");
|
||||||
}
|
}
|
||||||
@ -52,22 +76,15 @@ TEST_BEGIN(test_prof_reset_basic)
|
|||||||
/* Test resets with prof.lg_sample changes. */
|
/* Test resets with prof.lg_sample changes. */
|
||||||
lg_prof_sample_next = 1;
|
lg_prof_sample_next = 1;
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
assert_d_eq(mallctl("prof.reset", NULL, NULL,
|
do_prof_reset(lg_prof_sample_next);
|
||||||
&lg_prof_sample_next, sizeof(size_t)), 0,
|
lg_prof_sample = get_lg_prof_sample();
|
||||||
"Unexpected mallctl failure while resetting profile data");
|
|
||||||
sz = sizeof(size_t);
|
|
||||||
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz,
|
|
||||||
NULL, 0), 0, "Unexpected mallctl failure while reading "
|
|
||||||
"profiling sample rate");
|
|
||||||
assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
|
assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
|
||||||
"Expected profile sample rate change");
|
"Expected profile sample rate change");
|
||||||
lg_prof_sample_next = lg_prof_sample_orig;
|
lg_prof_sample_next = lg_prof_sample_orig;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure the test code restored prof.lg_sample. */
|
/* Make sure the test code restored prof.lg_sample. */
|
||||||
sz = sizeof(size_t);
|
lg_prof_sample = get_lg_prof_sample();
|
||||||
assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
|
|
||||||
"Unexpected mallctl failure while reading profiling sample rate");
|
|
||||||
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
|
||||||
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
|
"Unexpected disagreement between \"opt.lg_prof_sample\" and "
|
||||||
"\"prof.lg_sample\"");
|
"\"prof.lg_sample\"");
|
||||||
@ -88,15 +105,12 @@ prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
|
|||||||
|
|
||||||
TEST_BEGIN(test_prof_reset_cleanup)
|
TEST_BEGIN(test_prof_reset_cleanup)
|
||||||
{
|
{
|
||||||
bool active;
|
|
||||||
void *p;
|
void *p;
|
||||||
prof_dump_header_t *prof_dump_header_orig;
|
prof_dump_header_t *prof_dump_header_orig;
|
||||||
|
|
||||||
test_skip_if(!config_prof);
|
test_skip_if(!config_prof);
|
||||||
|
|
||||||
active = true;
|
set_prof_active(true);
|
||||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
|
||||||
0, "Unexpected mallctl failure while activating profiling");
|
|
||||||
|
|
||||||
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
|
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
|
||||||
p = mallocx(1, 0);
|
p = mallocx(1, 0);
|
||||||
@ -124,9 +138,7 @@ TEST_BEGIN(test_prof_reset_cleanup)
|
|||||||
dallocx(p, 0);
|
dallocx(p, 0);
|
||||||
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
|
assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
|
||||||
|
|
||||||
active = false;
|
set_prof_active(false);
|
||||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
|
||||||
0, "Unexpected mallctl failure while deactivating profiling");
|
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -182,7 +194,7 @@ thd_start(void *varg)
|
|||||||
|
|
||||||
TEST_BEGIN(test_prof_reset)
|
TEST_BEGIN(test_prof_reset)
|
||||||
{
|
{
|
||||||
bool active;
|
size_t lg_prof_sample_orig;
|
||||||
thd_t thds[NTHREADS];
|
thd_t thds[NTHREADS];
|
||||||
unsigned thd_args[NTHREADS];
|
unsigned thd_args[NTHREADS];
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -195,9 +207,10 @@ TEST_BEGIN(test_prof_reset)
|
|||||||
"Unexpected pre-existing tdata structures");
|
"Unexpected pre-existing tdata structures");
|
||||||
tdata_count = prof_tdata_count();
|
tdata_count = prof_tdata_count();
|
||||||
|
|
||||||
active = true;
|
lg_prof_sample_orig = get_lg_prof_sample();
|
||||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
do_prof_reset(5);
|
||||||
0, "Unexpected mallctl failure while activating profiling");
|
|
||||||
|
set_prof_active(true);
|
||||||
|
|
||||||
for (i = 0; i < NTHREADS; i++) {
|
for (i = 0; i < NTHREADS; i++) {
|
||||||
thd_args[i] = i;
|
thd_args[i] = i;
|
||||||
@ -211,9 +224,9 @@ TEST_BEGIN(test_prof_reset)
|
|||||||
assert_zu_eq(prof_tdata_count(), tdata_count,
|
assert_zu_eq(prof_tdata_count(), tdata_count,
|
||||||
"Unexpected remaining tdata structures");
|
"Unexpected remaining tdata structures");
|
||||||
|
|
||||||
active = false;
|
set_prof_active(false);
|
||||||
assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
|
|
||||||
0, "Unexpected mallctl failure while deactivating profiling");
|
do_prof_reset(lg_prof_sample_orig);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
#undef NTHREADS
|
#undef NTHREADS
|
||||||
@ -222,6 +235,58 @@ TEST_END
|
|||||||
#undef RESET_INTERVAL
|
#undef RESET_INTERVAL
|
||||||
#undef DUMP_INTERVAL
|
#undef DUMP_INTERVAL
|
||||||
|
|
||||||
|
/* Test sampling at the same allocation site across resets. */
|
||||||
|
#define NITER 10
|
||||||
|
TEST_BEGIN(test_xallocx)
|
||||||
|
{
|
||||||
|
size_t lg_prof_sample_orig;
|
||||||
|
unsigned i;
|
||||||
|
void *ptrs[NITER];
|
||||||
|
|
||||||
|
test_skip_if(!config_prof);
|
||||||
|
|
||||||
|
lg_prof_sample_orig = get_lg_prof_sample();
|
||||||
|
set_prof_active(true);
|
||||||
|
|
||||||
|
/* Reset profiling. */
|
||||||
|
do_prof_reset(0);
|
||||||
|
|
||||||
|
for (i = 0; i < NITER; i++) {
|
||||||
|
void *p;
|
||||||
|
size_t sz, nsz;
|
||||||
|
|
||||||
|
/* Reset profiling. */
|
||||||
|
do_prof_reset(0);
|
||||||
|
|
||||||
|
/* Allocate small object (which will be promoted). */
|
||||||
|
p = ptrs[i] = mallocx(1, 0);
|
||||||
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
|
/* Reset profiling. */
|
||||||
|
do_prof_reset(0);
|
||||||
|
|
||||||
|
/* Perform successful xallocx(). */
|
||||||
|
sz = sallocx(p, 0);
|
||||||
|
assert_zu_eq(xallocx(p, sz, 0, 0), sz,
|
||||||
|
"Unexpected xallocx() failure");
|
||||||
|
|
||||||
|
/* Perform unsuccessful xallocx(). */
|
||||||
|
nsz = nallocx(sz+1, 0);
|
||||||
|
assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
|
||||||
|
"Unexpected xallocx() success");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i = 0; i < NITER; i++) {
|
||||||
|
/* dallocx. */
|
||||||
|
dallocx(ptrs[i], 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
set_prof_active(false);
|
||||||
|
do_prof_reset(lg_prof_sample_orig);
|
||||||
|
}
|
||||||
|
TEST_END
|
||||||
|
#undef NITER
|
||||||
|
|
||||||
int
|
int
|
||||||
main(void)
|
main(void)
|
||||||
{
|
{
|
||||||
@ -232,5 +297,6 @@ main(void)
|
|||||||
return (test(
|
return (test(
|
||||||
test_prof_reset_basic,
|
test_prof_reset_basic,
|
||||||
test_prof_reset_cleanup,
|
test_prof_reset_cleanup,
|
||||||
test_prof_reset));
|
test_prof_reset,
|
||||||
|
test_xallocx));
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ get_max_size_class(void)
|
|||||||
TEST_BEGIN(test_size_classes)
|
TEST_BEGIN(test_size_classes)
|
||||||
{
|
{
|
||||||
size_t size_class, max_size_class;
|
size_t size_class, max_size_class;
|
||||||
index_t index, max_index;
|
szind_t index, max_index;
|
||||||
|
|
||||||
max_size_class = get_max_size_class();
|
max_size_class = get_max_size_class();
|
||||||
max_index = size2index(max_size_class);
|
max_index = size2index(max_size_class);
|
||||||
|
@ -42,7 +42,7 @@ TEST_BEGIN(test_stats_huge)
|
|||||||
size_t sz;
|
size_t sz;
|
||||||
int expected = config_stats ? 0 : ENOENT;
|
int expected = config_stats ? 0 : ENOENT;
|
||||||
|
|
||||||
p = mallocx(arena_maxclass+1, 0);
|
p = mallocx(large_maxclass+1, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||||
@ -88,7 +88,7 @@ TEST_BEGIN(test_stats_arenas_summary)
|
|||||||
|
|
||||||
little = mallocx(SMALL_MAXCLASS, 0);
|
little = mallocx(SMALL_MAXCLASS, 0);
|
||||||
assert_ptr_not_null(little, "Unexpected mallocx() failure");
|
assert_ptr_not_null(little, "Unexpected mallocx() failure");
|
||||||
large = mallocx(arena_maxclass, 0);
|
large = mallocx(large_maxclass, 0);
|
||||||
assert_ptr_not_null(large, "Unexpected mallocx() failure");
|
assert_ptr_not_null(large, "Unexpected mallocx() failure");
|
||||||
huge = mallocx(chunksize, 0);
|
huge = mallocx(chunksize, 0);
|
||||||
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
|
assert_ptr_not_null(huge, "Unexpected mallocx() failure");
|
||||||
@ -200,7 +200,7 @@ TEST_BEGIN(test_stats_arenas_large)
|
|||||||
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
assert_d_eq(mallctl("thread.arena", NULL, NULL, &arena, sizeof(arena)),
|
||||||
0, "Unexpected mallctl() failure");
|
0, "Unexpected mallctl() failure");
|
||||||
|
|
||||||
p = mallocx(arena_maxclass, 0);
|
p = mallocx(large_maxclass, 0);
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||||
|
|
||||||
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
assert_d_eq(mallctl("epoch", NULL, NULL, &epoch, sizeof(epoch)), 0,
|
||||||
|
@ -56,9 +56,14 @@ static void *
|
|||||||
thd_start(void *arg)
|
thd_start(void *arg)
|
||||||
{
|
{
|
||||||
data_t d = (data_t)(uintptr_t)arg;
|
data_t d = (data_t)(uintptr_t)arg;
|
||||||
|
void *p;
|
||||||
|
|
||||||
assert_x_eq(*data_tsd_get(), DATA_INIT,
|
assert_x_eq(*data_tsd_get(), DATA_INIT,
|
||||||
"Initial tsd get should return initialization value");
|
"Initial tsd get should return initialization value");
|
||||||
|
|
||||||
|
p = malloc(1);
|
||||||
|
assert_ptr_not_null(p, "Unexpected malloc() failure");
|
||||||
|
|
||||||
data_tsd_set(&d);
|
data_tsd_set(&d);
|
||||||
assert_x_eq(*data_tsd_get(), d,
|
assert_x_eq(*data_tsd_get(), d,
|
||||||
"After tsd set, tsd get should return value that was set");
|
"After tsd set, tsd get should return value that was set");
|
||||||
@ -67,6 +72,7 @@ thd_start(void *arg)
|
|||||||
assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
|
assert_x_eq(*data_tsd_get(), (data_t)(uintptr_t)arg,
|
||||||
"Resetting local data should have no effect on tsd");
|
"Resetting local data should have no effect on tsd");
|
||||||
|
|
||||||
|
free(p);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ TEST_BEGIN(test_zero_large)
|
|||||||
{
|
{
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
test_skip_if(!config_fill);
|
||||||
test_zero(SMALL_MAXCLASS+1, arena_maxclass);
|
test_zero(SMALL_MAXCLASS+1, large_maxclass);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ TEST_BEGIN(test_zero_huge)
|
|||||||
{
|
{
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
test_skip_if(!config_fill);
|
||||||
test_zero(arena_maxclass+1, chunksize*2);
|
test_zero(large_maxclass+1, chunksize*2);
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user