parent
ba5c709517
commit
17c021c177
5
INSTALL
5
INSTALL
@ -165,9 +165,8 @@ any of the following arguments (not a definitive list) to 'configure':
|
|||||||
normal jemalloc operation.
|
normal jemalloc operation.
|
||||||
|
|
||||||
--disable-fill
|
--disable-fill
|
||||||
Disable support for junk/zero filling of memory and redzones. See the
|
Disable support for junk/zero filling of memory. See the "opt.junk" and
|
||||||
"opt.junk", "opt.zero", and "opt.redzone" option documentation for usage
|
"opt.zero" option documentation for usage details.
|
||||||
details.
|
|
||||||
|
|
||||||
--disable-zone-allocator
|
--disable-zone-allocator
|
||||||
Disable zone allocator for Darwin. This means jemalloc won't be hooked as
|
Disable zone allocator for Darwin. This means jemalloc won't be hooked as
|
||||||
|
@ -945,8 +945,7 @@ fi
|
|||||||
|
|
||||||
dnl Support the junk/zero filling option by default.
|
dnl Support the junk/zero filling option by default.
|
||||||
AC_ARG_ENABLE([fill],
|
AC_ARG_ENABLE([fill],
|
||||||
[AS_HELP_STRING([--disable-fill],
|
[AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
|
||||||
[Disable support for junk/zero filling and redzones])],
|
|
||||||
[if test "x$enable_fill" = "xno" ; then
|
[if test "x$enable_fill" = "xno" ; then
|
||||||
enable_fill="0"
|
enable_fill="0"
|
||||||
else
|
else
|
||||||
|
@ -1040,21 +1040,6 @@ for (i = 0; i < nbins; i++) {
|
|||||||
default.</para></listitem>
|
default.</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry id="opt.redzone">
|
|
||||||
<term>
|
|
||||||
<mallctl>opt.redzone</mallctl>
|
|
||||||
(<type>bool</type>)
|
|
||||||
<literal>r-</literal>
|
|
||||||
[<option>--enable-fill</option>]
|
|
||||||
</term>
|
|
||||||
<listitem><para>Redzones enabled/disabled. If enabled, small
|
|
||||||
allocations have redzones before and after them. Furthermore, if the
|
|
||||||
<link linkend="opt.junk"><mallctl>opt.junk</mallctl></link> option is
|
|
||||||
enabled, the redzones are checked for corruption during deallocation.
|
|
||||||
This option is intended for debugging and will impact performance
|
|
||||||
negatively. This option is disabled by default.</para></listitem>
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
<varlistentry id="opt.zero">
|
<varlistentry id="opt.zero">
|
||||||
<term>
|
<term>
|
||||||
<mallctl>opt.zero</mallctl>
|
<mallctl>opt.zero</mallctl>
|
||||||
|
@ -7,12 +7,6 @@
|
|||||||
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||||
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
|
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
|
||||||
|
|
||||||
/*
|
|
||||||
* Minimum redzone size. Redzones may be larger than this if necessary to
|
|
||||||
* preserve region alignment.
|
|
||||||
*/
|
|
||||||
#define REDZONE_MINSIZE 16
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The minimum ratio of active:dirty pages per arena is computed as:
|
* The minimum ratio of active:dirty pages per arena is computed as:
|
||||||
*
|
*
|
||||||
@ -205,42 +199,22 @@ struct arena_chunk_s {
|
|||||||
*
|
*
|
||||||
* Each run has the following layout:
|
* Each run has the following layout:
|
||||||
*
|
*
|
||||||
* /--------------------\
|
* /--------------------\
|
||||||
* | pad? |
|
* | region 0 |
|
||||||
* |--------------------|
|
* |--------------------|
|
||||||
* | redzone |
|
* | region 1 |
|
||||||
* reg0_offset | region 0 |
|
* |--------------------|
|
||||||
* | redzone |
|
* | ... |
|
||||||
* |--------------------| \
|
* | ... |
|
||||||
* | redzone | |
|
* | ... |
|
||||||
* | region 1 | > reg_interval
|
* |--------------------|
|
||||||
* | redzone | /
|
* | region nregs-1 |
|
||||||
* |--------------------|
|
* \--------------------/
|
||||||
* | ... |
|
|
||||||
* | ... |
|
|
||||||
* | ... |
|
|
||||||
* |--------------------|
|
|
||||||
* | redzone |
|
|
||||||
* | region nregs-1 |
|
|
||||||
* | redzone |
|
|
||||||
* |--------------------|
|
|
||||||
* | alignment pad? |
|
|
||||||
* \--------------------/
|
|
||||||
*
|
|
||||||
* reg_interval has at least the same minimum alignment as reg_size; this
|
|
||||||
* preserves the alignment constraint that sa2u() depends on. Alignment pad is
|
|
||||||
* either 0 or redzone_size; it is present only if needed to align reg0_offset.
|
|
||||||
*/
|
*/
|
||||||
struct arena_bin_info_s {
|
struct arena_bin_info_s {
|
||||||
/* Size of regions in a run for this bin's size class. */
|
/* Size of regions in a run for this bin's size class. */
|
||||||
size_t reg_size;
|
size_t reg_size;
|
||||||
|
|
||||||
/* Redzone size. */
|
|
||||||
size_t redzone_size;
|
|
||||||
|
|
||||||
/* Interval between regions (reg_size + (redzone_size << 1)). */
|
|
||||||
size_t reg_interval;
|
|
||||||
|
|
||||||
/* Total size of a run for this bin's size class. */
|
/* Total size of a run for this bin's size class. */
|
||||||
size_t run_size;
|
size_t run_size;
|
||||||
|
|
||||||
@ -252,9 +226,6 @@ struct arena_bin_info_s {
|
|||||||
* bin.
|
* bin.
|
||||||
*/
|
*/
|
||||||
bitmap_info_t bitmap_info;
|
bitmap_info_t bitmap_info;
|
||||||
|
|
||||||
/* Offset of first region in a run for this bin's size class. */
|
|
||||||
uint32_t reg0_offset;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct arena_bin_s {
|
struct arena_bin_s {
|
||||||
@ -543,9 +514,6 @@ void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
|
|||||||
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
||||||
bool zero);
|
bool zero);
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
|
|
||||||
uint8_t);
|
|
||||||
extern arena_redzone_corruption_t *arena_redzone_corruption;
|
|
||||||
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
|
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
|
||||||
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
|
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
|
||||||
#else
|
#else
|
||||||
@ -1113,8 +1081,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
|||||||
assert(run_binind == actual_binind);
|
assert(run_binind == actual_binind);
|
||||||
bin_info = &arena_bin_info[actual_binind];
|
bin_info = &arena_bin_info[actual_binind];
|
||||||
rpages = arena_miscelm_to_rpages(miscelm);
|
rpages = arena_miscelm_to_rpages(miscelm);
|
||||||
assert(((uintptr_t)ptr - ((uintptr_t)rpages +
|
assert(((uintptr_t)ptr - (uintptr_t)rpages) % bin_info->reg_size
|
||||||
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
|
|
||||||
== 0);
|
== 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1142,18 +1109,16 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
|
|||||||
* Freeing a pointer lower than region zero can cause assertion
|
* Freeing a pointer lower than region zero can cause assertion
|
||||||
* failure.
|
* failure.
|
||||||
*/
|
*/
|
||||||
assert((uintptr_t)ptr >= (uintptr_t)rpages +
|
assert((uintptr_t)ptr >= (uintptr_t)rpages);
|
||||||
(uintptr_t)bin_info->reg0_offset);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid doing division with a variable divisor if possible. Using
|
* Avoid doing division with a variable divisor if possible. Using
|
||||||
* actual division here can reduce allocator throughput by over 20%!
|
* actual division here can reduce allocator throughput by over 20%!
|
||||||
*/
|
*/
|
||||||
diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
|
diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages);
|
||||||
bin_info->reg0_offset);
|
|
||||||
|
|
||||||
/* Rescale (factor powers of 2 out of the numerator and denominator). */
|
/* Rescale (factor powers of 2 out of the numerator and denominator). */
|
||||||
interval = bin_info->reg_interval;
|
interval = bin_info->reg_size;
|
||||||
shift = ffs_zu(interval) - 1;
|
shift = ffs_zu(interval) - 1;
|
||||||
diff >>= shift;
|
diff >>= shift;
|
||||||
interval >>= shift;
|
interval >>= shift;
|
||||||
|
@ -423,7 +423,6 @@ extern bool opt_abort;
|
|||||||
extern const char *opt_junk;
|
extern const char *opt_junk;
|
||||||
extern bool opt_junk_alloc;
|
extern bool opt_junk_alloc;
|
||||||
extern bool opt_junk_free;
|
extern bool opt_junk_free;
|
||||||
extern bool opt_redzone;
|
|
||||||
extern bool opt_utrace;
|
extern bool opt_utrace;
|
||||||
extern bool opt_xmalloc;
|
extern bool opt_xmalloc;
|
||||||
extern bool opt_zero;
|
extern bool opt_zero;
|
||||||
@ -888,8 +887,6 @@ void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
tcache_t *tcache, arena_t *arena);
|
tcache_t *tcache, arena_t *arena);
|
||||||
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
||||||
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
|
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
|
||||||
size_t u2rz(size_t usize);
|
|
||||||
size_t p2rz(tsdn_t *tsdn, const void *ptr);
|
|
||||||
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
|
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
|
||||||
bool slow_path);
|
bool slow_path);
|
||||||
void idalloc(tsd_t *tsd, void *ptr);
|
void idalloc(tsd_t *tsd, void *ptr);
|
||||||
@ -1011,28 +1008,6 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
|||||||
return (isalloc(tsdn, ptr, demote));
|
return (isalloc(tsdn, ptr, demote));
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
u2rz(size_t usize)
|
|
||||||
{
|
|
||||||
size_t ret;
|
|
||||||
|
|
||||||
if (usize <= SMALL_MAXCLASS) {
|
|
||||||
szind_t binind = size2index(usize);
|
|
||||||
ret = arena_bin_info[binind].redzone_size;
|
|
||||||
} else
|
|
||||||
ret = 0;
|
|
||||||
|
|
||||||
return (ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
|
||||||
p2rz(tsdn_t *tsdn, const void *ptr)
|
|
||||||
{
|
|
||||||
size_t usize = isalloc(tsdn, ptr, false);
|
|
||||||
|
|
||||||
return (u2rz(usize));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
|
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
|
||||||
bool slow_path)
|
bool slow_path)
|
||||||
|
@ -142,7 +142,7 @@
|
|||||||
*/
|
*/
|
||||||
#undef JEMALLOC_DSS
|
#undef JEMALLOC_DSS
|
||||||
|
|
||||||
/* Support memory filling (junk/zero/redzone). */
|
/* Support memory filling (junk/zero). */
|
||||||
#undef JEMALLOC_FILL
|
#undef JEMALLOC_FILL
|
||||||
|
|
||||||
/* Support utrace(2)-based tracing. */
|
/* Support utrace(2)-based tracing. */
|
||||||
|
@ -103,7 +103,6 @@ arena_ralloc
|
|||||||
arena_ralloc_junk_large
|
arena_ralloc_junk_large
|
||||||
arena_ralloc_no_move
|
arena_ralloc_no_move
|
||||||
arena_rd_to_miscelm
|
arena_rd_to_miscelm
|
||||||
arena_redzone_corruption
|
|
||||||
arena_reset
|
arena_reset
|
||||||
arena_run_regind
|
arena_run_regind
|
||||||
arena_run_to_miscelm
|
arena_run_to_miscelm
|
||||||
@ -382,13 +381,11 @@ opt_prof_leak
|
|||||||
opt_prof_prefix
|
opt_prof_prefix
|
||||||
opt_prof_thread_active_init
|
opt_prof_thread_active_init
|
||||||
opt_purge
|
opt_purge
|
||||||
opt_redzone
|
|
||||||
opt_stats_print
|
opt_stats_print
|
||||||
opt_tcache
|
opt_tcache
|
||||||
opt_utrace
|
opt_utrace
|
||||||
opt_xmalloc
|
opt_xmalloc
|
||||||
opt_zero
|
opt_zero
|
||||||
p2rz
|
|
||||||
pages_boot
|
pages_boot
|
||||||
pages_commit
|
pages_commit
|
||||||
pages_decommit
|
pages_decommit
|
||||||
@ -578,7 +575,6 @@ tsd_witnessesp_get
|
|||||||
tsdn_fetch
|
tsdn_fetch
|
||||||
tsdn_null
|
tsdn_null
|
||||||
tsdn_tsd
|
tsdn_tsd
|
||||||
u2rz
|
|
||||||
witness_assert_lockless
|
witness_assert_lockless
|
||||||
witness_assert_not_owner
|
witness_assert_not_owner
|
||||||
witness_assert_owner
|
witness_assert_owner
|
||||||
|
153
src/arena.c
153
src/arena.c
@ -314,8 +314,8 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
|
|||||||
regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
|
regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
|
||||||
miscelm = arena_run_to_miscelm(run);
|
miscelm = arena_run_to_miscelm(run);
|
||||||
rpages = arena_miscelm_to_rpages(miscelm);
|
rpages = arena_miscelm_to_rpages(miscelm);
|
||||||
ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
|
ret = (void *)((uintptr_t)rpages + (uintptr_t)(bin_info->reg_size *
|
||||||
(uintptr_t)(bin_info->reg_interval * regind));
|
regind));
|
||||||
run->nfree--;
|
run->nfree--;
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -333,12 +333,10 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
|
|||||||
assert(run->nfree < bin_info->nregs);
|
assert(run->nfree < bin_info->nregs);
|
||||||
/* Freeing an interior pointer can cause assertion failure. */
|
/* Freeing an interior pointer can cause assertion failure. */
|
||||||
assert(((uintptr_t)ptr -
|
assert(((uintptr_t)ptr -
|
||||||
((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
|
(uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run))) %
|
||||||
(uintptr_t)bin_info->reg0_offset)) %
|
(uintptr_t)bin_info->reg_size == 0);
|
||||||
(uintptr_t)bin_info->reg_interval == 0);
|
|
||||||
assert((uintptr_t)ptr >=
|
assert((uintptr_t)ptr >=
|
||||||
(uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
|
(uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)));
|
||||||
(uintptr_t)bin_info->reg0_offset);
|
|
||||||
/* Freeing an unallocated pointer can cause assertion failure. */
|
/* Freeing an unallocated pointer can cause assertion failure. */
|
||||||
assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
|
assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
|
||||||
|
|
||||||
@ -2395,73 +2393,8 @@ void
|
|||||||
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
|
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
size_t redzone_size = bin_info->redzone_size;
|
if (!zero)
|
||||||
|
memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
|
||||||
if (zero) {
|
|
||||||
memset((void *)((uintptr_t)ptr - redzone_size),
|
|
||||||
JEMALLOC_ALLOC_JUNK, redzone_size);
|
|
||||||
memset((void *)((uintptr_t)ptr + bin_info->reg_size),
|
|
||||||
JEMALLOC_ALLOC_JUNK, redzone_size);
|
|
||||||
} else {
|
|
||||||
memset((void *)((uintptr_t)ptr - redzone_size),
|
|
||||||
JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
#undef arena_redzone_corruption
|
|
||||||
#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
|
|
||||||
#endif
|
|
||||||
static void
|
|
||||||
arena_redzone_corruption(void *ptr, size_t usize, bool after,
|
|
||||||
size_t offset, uint8_t byte)
|
|
||||||
{
|
|
||||||
|
|
||||||
malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
|
|
||||||
"(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
|
|
||||||
after ? "after" : "before", ptr, usize, byte);
|
|
||||||
}
|
|
||||||
#ifdef JEMALLOC_JET
|
|
||||||
#undef arena_redzone_corruption
|
|
||||||
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
|
|
||||||
arena_redzone_corruption_t *arena_redzone_corruption =
|
|
||||||
JEMALLOC_N(n_arena_redzone_corruption);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void
|
|
||||||
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
|
|
||||||
{
|
|
||||||
bool error = false;
|
|
||||||
|
|
||||||
if (opt_junk_alloc) {
|
|
||||||
size_t size = bin_info->reg_size;
|
|
||||||
size_t redzone_size = bin_info->redzone_size;
|
|
||||||
size_t i;
|
|
||||||
|
|
||||||
for (i = 1; i <= redzone_size; i++) {
|
|
||||||
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
|
|
||||||
if (*byte != JEMALLOC_ALLOC_JUNK) {
|
|
||||||
error = true;
|
|
||||||
arena_redzone_corruption(ptr, size, false, i,
|
|
||||||
*byte);
|
|
||||||
if (reset)
|
|
||||||
*byte = JEMALLOC_ALLOC_JUNK;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (i = 0; i < redzone_size; i++) {
|
|
||||||
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
|
|
||||||
if (*byte != JEMALLOC_ALLOC_JUNK) {
|
|
||||||
error = true;
|
|
||||||
arena_redzone_corruption(ptr, size, true, i,
|
|
||||||
*byte);
|
|
||||||
if (reset)
|
|
||||||
*byte = JEMALLOC_ALLOC_JUNK;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (opt_abort && error)
|
|
||||||
abort();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
@ -2471,11 +2404,8 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
|
|||||||
void
|
void
|
||||||
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
|
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
|
||||||
{
|
{
|
||||||
size_t redzone_size = bin_info->redzone_size;
|
|
||||||
|
|
||||||
arena_redzones_validate(ptr, bin_info, false);
|
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
|
||||||
memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
|
|
||||||
bin_info->reg_interval);
|
|
||||||
}
|
}
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
#undef arena_dalloc_junk_small
|
#undef arena_dalloc_junk_small
|
||||||
@ -3559,43 +3489,16 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
* *) bin_info->run_size <= arena_maxrun
|
* *) bin_info->run_size <= arena_maxrun
|
||||||
* *) bin_info->nregs <= RUN_MAXREGS
|
* *) bin_info->nregs <= RUN_MAXREGS
|
||||||
*
|
*
|
||||||
* bin_info->nregs and bin_info->reg0_offset are also calculated here, since
|
* bin_info->nregs is also calculated here, since these settings are all
|
||||||
* these settings are all interdependent.
|
* interdependent.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
||||||
{
|
{
|
||||||
size_t pad_size;
|
|
||||||
size_t try_run_size, perfect_run_size, actual_run_size;
|
size_t try_run_size, perfect_run_size, actual_run_size;
|
||||||
uint32_t try_nregs, perfect_nregs, actual_nregs;
|
uint32_t try_nregs, perfect_nregs, actual_nregs;
|
||||||
|
|
||||||
/*
|
/* Compute smallest run size that is an integer multiple of reg_size. */
|
||||||
* Determine redzone size based on minimum alignment and minimum
|
|
||||||
* redzone size. Add padding to the end of the run if it is needed to
|
|
||||||
* align the regions. The padding allows each redzone to be half the
|
|
||||||
* minimum alignment; without the padding, each redzone would have to
|
|
||||||
* be twice as large in order to maintain alignment.
|
|
||||||
*/
|
|
||||||
if (config_fill && unlikely(opt_redzone)) {
|
|
||||||
size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
|
|
||||||
if (align_min <= REDZONE_MINSIZE) {
|
|
||||||
bin_info->redzone_size = REDZONE_MINSIZE;
|
|
||||||
pad_size = 0;
|
|
||||||
} else {
|
|
||||||
bin_info->redzone_size = align_min >> 1;
|
|
||||||
pad_size = bin_info->redzone_size;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bin_info->redzone_size = 0;
|
|
||||||
pad_size = 0;
|
|
||||||
}
|
|
||||||
bin_info->reg_interval = bin_info->reg_size +
|
|
||||||
(bin_info->redzone_size << 1);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Compute run size under ideal conditions (no redzones, no limit on run
|
|
||||||
* size).
|
|
||||||
*/
|
|
||||||
try_run_size = PAGE;
|
try_run_size = PAGE;
|
||||||
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
||||||
do {
|
do {
|
||||||
@ -3605,48 +3508,18 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info)
|
|||||||
try_run_size += PAGE;
|
try_run_size += PAGE;
|
||||||
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
|
||||||
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
|
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
|
||||||
|
assert(perfect_run_size <= arena_maxrun);
|
||||||
assert(perfect_nregs <= RUN_MAXREGS);
|
assert(perfect_nregs <= RUN_MAXREGS);
|
||||||
|
|
||||||
actual_run_size = perfect_run_size;
|
actual_run_size = perfect_run_size;
|
||||||
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
actual_nregs = (uint32_t)((actual_run_size) / bin_info->reg_size);
|
||||||
bin_info->reg_interval);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Redzones can require enough padding that not even a single region can
|
|
||||||
* fit within the number of pages that would normally be dedicated to a
|
|
||||||
* run for this size class. Increase the run size until at least one
|
|
||||||
* region fits.
|
|
||||||
*/
|
|
||||||
while (actual_nregs == 0) {
|
|
||||||
assert(config_fill && unlikely(opt_redzone));
|
|
||||||
|
|
||||||
actual_run_size += PAGE;
|
|
||||||
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
|
||||||
bin_info->reg_interval);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure that the run will fit within an arena chunk.
|
|
||||||
*/
|
|
||||||
while (actual_run_size > arena_maxrun) {
|
|
||||||
actual_run_size -= PAGE;
|
|
||||||
actual_nregs = (uint32_t)((actual_run_size - pad_size) /
|
|
||||||
bin_info->reg_interval);
|
|
||||||
}
|
|
||||||
assert(actual_nregs > 0);
|
|
||||||
assert(actual_run_size == s2u(actual_run_size));
|
|
||||||
|
|
||||||
/* Copy final settings. */
|
/* Copy final settings. */
|
||||||
bin_info->run_size = actual_run_size;
|
bin_info->run_size = actual_run_size;
|
||||||
bin_info->nregs = actual_nregs;
|
bin_info->nregs = actual_nregs;
|
||||||
bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
|
|
||||||
bin_info->reg_interval) - pad_size + bin_info->redzone_size);
|
|
||||||
|
|
||||||
if (actual_run_size > small_maxrun)
|
if (actual_run_size > small_maxrun)
|
||||||
small_maxrun = actual_run_size;
|
small_maxrun = actual_run_size;
|
||||||
|
|
||||||
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
|
|
||||||
* bin_info->reg_interval) + pad_size == bin_info->run_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -97,7 +97,6 @@ CTL_PROTO(opt_decay_time)
|
|||||||
CTL_PROTO(opt_stats_print)
|
CTL_PROTO(opt_stats_print)
|
||||||
CTL_PROTO(opt_junk)
|
CTL_PROTO(opt_junk)
|
||||||
CTL_PROTO(opt_zero)
|
CTL_PROTO(opt_zero)
|
||||||
CTL_PROTO(opt_redzone)
|
|
||||||
CTL_PROTO(opt_utrace)
|
CTL_PROTO(opt_utrace)
|
||||||
CTL_PROTO(opt_xmalloc)
|
CTL_PROTO(opt_xmalloc)
|
||||||
CTL_PROTO(opt_tcache)
|
CTL_PROTO(opt_tcache)
|
||||||
@ -272,7 +271,6 @@ static const ctl_named_node_t opt_node[] = {
|
|||||||
{NAME("stats_print"), CTL(opt_stats_print)},
|
{NAME("stats_print"), CTL(opt_stats_print)},
|
||||||
{NAME("junk"), CTL(opt_junk)},
|
{NAME("junk"), CTL(opt_junk)},
|
||||||
{NAME("zero"), CTL(opt_zero)},
|
{NAME("zero"), CTL(opt_zero)},
|
||||||
{NAME("redzone"), CTL(opt_redzone)},
|
|
||||||
{NAME("utrace"), CTL(opt_utrace)},
|
{NAME("utrace"), CTL(opt_utrace)},
|
||||||
{NAME("xmalloc"), CTL(opt_xmalloc)},
|
{NAME("xmalloc"), CTL(opt_xmalloc)},
|
||||||
{NAME("tcache"), CTL(opt_tcache)},
|
{NAME("tcache"), CTL(opt_tcache)},
|
||||||
@ -1279,7 +1277,6 @@ CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
|
|||||||
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
|
CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
|
||||||
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
|
||||||
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
|
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
|
||||||
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
|
|
||||||
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
|
||||||
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
|
||||||
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
|
||||||
|
@ -35,7 +35,6 @@ bool opt_junk_free =
|
|||||||
#endif
|
#endif
|
||||||
;
|
;
|
||||||
|
|
||||||
bool opt_redzone = false;
|
|
||||||
bool opt_utrace = false;
|
bool opt_utrace = false;
|
||||||
bool opt_xmalloc = false;
|
bool opt_xmalloc = false;
|
||||||
bool opt_zero = false;
|
bool opt_zero = false;
|
||||||
@ -1040,16 +1039,15 @@ malloc_conf_init(void)
|
|||||||
|
|
||||||
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
||||||
/*
|
/*
|
||||||
* Chunks always require at least one header page,
|
* Chunks always require at least one header page and as
|
||||||
* as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
|
* many as 2^(LG_SIZE_CLASS_GROUP+1) data pages. In
|
||||||
* possibly an additional page in the presence of
|
* order to simplify options processing, use a
|
||||||
* redzones. In order to simplify options processing,
|
* conservative bound that accommodates all these
|
||||||
* use a conservative bound that accommodates all these
|
|
||||||
* constraints.
|
* constraints.
|
||||||
*/
|
*/
|
||||||
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
||||||
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
|
LG_SIZE_CLASS_GROUP + 1, (sizeof(size_t) << 3) - 1,
|
||||||
(sizeof(size_t) << 3) - 1, true)
|
true)
|
||||||
if (strncmp("dss", k, klen) == 0) {
|
if (strncmp("dss", k, klen) == 0) {
|
||||||
int i;
|
int i;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
@ -1124,7 +1122,6 @@ malloc_conf_init(void)
|
|||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
|
|
||||||
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
CONF_HANDLE_BOOL(opt_zero, "zero", true)
|
||||||
}
|
}
|
||||||
if (config_utrace) {
|
if (config_utrace) {
|
||||||
|
@ -513,7 +513,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
|||||||
OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time)
|
OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time)
|
||||||
OPT_WRITE_BOOL(stats_print)
|
OPT_WRITE_BOOL(stats_print)
|
||||||
OPT_WRITE_CHAR_P(junk)
|
OPT_WRITE_CHAR_P(junk)
|
||||||
OPT_WRITE_BOOL(redzone)
|
|
||||||
OPT_WRITE_BOOL(zero)
|
OPT_WRITE_BOOL(zero)
|
||||||
OPT_WRITE_BOOL(utrace)
|
OPT_WRITE_BOOL(utrace)
|
||||||
OPT_WRITE_BOOL(xmalloc)
|
OPT_WRITE_BOOL(xmalloc)
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
# define JEMALLOC_TEST_JUNK_OPT "junk:true"
|
# define JEMALLOC_TEST_JUNK_OPT "junk:true"
|
||||||
# endif
|
# endif
|
||||||
const char *malloc_conf =
|
const char *malloc_conf =
|
||||||
"abort:false,zero:false,redzone:true," JEMALLOC_TEST_JUNK_OPT;
|
"abort:false,zero:false," JEMALLOC_TEST_JUNK_OPT;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
|
static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig;
|
||||||
@ -197,49 +197,6 @@ TEST_BEGIN(test_junk_large_ralloc_shrink)
|
|||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
static bool detected_redzone_corruption;
|
|
||||||
|
|
||||||
static void
|
|
||||||
arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after,
|
|
||||||
size_t offset, uint8_t byte)
|
|
||||||
{
|
|
||||||
|
|
||||||
detected_redzone_corruption = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_BEGIN(test_junk_redzone)
|
|
||||||
{
|
|
||||||
char *s;
|
|
||||||
arena_redzone_corruption_t *arena_redzone_corruption_orig;
|
|
||||||
|
|
||||||
test_skip_if(!config_fill);
|
|
||||||
test_skip_if(!opt_junk_alloc || !opt_junk_free);
|
|
||||||
|
|
||||||
arena_redzone_corruption_orig = arena_redzone_corruption;
|
|
||||||
arena_redzone_corruption = arena_redzone_corruption_replacement;
|
|
||||||
|
|
||||||
/* Test underflow. */
|
|
||||||
detected_redzone_corruption = false;
|
|
||||||
s = (char *)mallocx(1, 0);
|
|
||||||
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
|
|
||||||
s[-1] = 0xbb;
|
|
||||||
dallocx(s, 0);
|
|
||||||
assert_true(detected_redzone_corruption,
|
|
||||||
"Did not detect redzone corruption");
|
|
||||||
|
|
||||||
/* Test overflow. */
|
|
||||||
detected_redzone_corruption = false;
|
|
||||||
s = (char *)mallocx(1, 0);
|
|
||||||
assert_ptr_not_null((void *)s, "Unexpected mallocx() failure");
|
|
||||||
s[sallocx(s, 0)] = 0xbb;
|
|
||||||
dallocx(s, 0);
|
|
||||||
assert_true(detected_redzone_corruption,
|
|
||||||
"Did not detect redzone corruption");
|
|
||||||
|
|
||||||
arena_redzone_corruption = arena_redzone_corruption_orig;
|
|
||||||
}
|
|
||||||
TEST_END
|
|
||||||
|
|
||||||
int
|
int
|
||||||
main(void)
|
main(void)
|
||||||
{
|
{
|
||||||
@ -248,6 +205,5 @@ main(void)
|
|||||||
test_junk_small,
|
test_junk_small,
|
||||||
test_junk_large,
|
test_junk_large,
|
||||||
test_junk_huge,
|
test_junk_huge,
|
||||||
test_junk_large_ralloc_shrink,
|
test_junk_large_ralloc_shrink));
|
||||||
test_junk_redzone));
|
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,6 @@ TEST_BEGIN(test_mallctl_opt)
|
|||||||
TEST_MALLCTL_OPT(ssize_t, decay_time, always);
|
TEST_MALLCTL_OPT(ssize_t, decay_time, always);
|
||||||
TEST_MALLCTL_OPT(bool, stats_print, always);
|
TEST_MALLCTL_OPT(bool, stats_print, always);
|
||||||
TEST_MALLCTL_OPT(const char *, junk, fill);
|
TEST_MALLCTL_OPT(const char *, junk, fill);
|
||||||
TEST_MALLCTL_OPT(bool, redzone, fill);
|
|
||||||
TEST_MALLCTL_OPT(bool, zero, fill);
|
TEST_MALLCTL_OPT(bool, zero, fill);
|
||||||
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
TEST_MALLCTL_OPT(bool, utrace, utrace);
|
||||||
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
|
TEST_MALLCTL_OPT(bool, xmalloc, xmalloc);
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
#ifdef JEMALLOC_FILL
|
#ifdef JEMALLOC_FILL
|
||||||
const char *malloc_conf =
|
const char *malloc_conf =
|
||||||
"abort:false,junk:false,zero:true,redzone:false";
|
"abort:false,junk:false,zero:true";
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
Loading…
Reference in New Issue
Block a user