Remove redzone support.

This resolves #369.
This commit is contained in:
Jason Evans
2016-04-05 18:18:15 -07:00
parent ba5c709517
commit 17c021c177
14 changed files with 41 additions and 301 deletions

View File

@@ -7,12 +7,6 @@
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
/*
* Minimum redzone size. Redzones may be larger than this if necessary to
* preserve region alignment.
*/
#define REDZONE_MINSIZE 16
/*
* The minimum ratio of active:dirty pages per arena is computed as:
*
@@ -205,42 +199,22 @@ struct arena_chunk_s {
*
* Each run has the following layout:
*
* /--------------------\
* | pad? |
* |--------------------|
* | redzone |
* reg0_offset | region 0 |
* | redzone |
* |--------------------| \
* | redzone | |
* | region 1 | > reg_interval
* | redzone | /
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | redzone |
* | region nregs-1 |
* | redzone |
* |--------------------|
* | alignment pad? |
* \--------------------/
*
* reg_interval has at least the same minimum alignment as reg_size; this
* preserves the alignment constraint that sa2u() depends on. Alignment pad is
* either 0 or redzone_size; it is present only if needed to align reg0_offset.
* /--------------------\
* | region 0 |
* |--------------------|
* | region 1 |
* |--------------------|
* | ... |
* | ... |
* | ... |
* |--------------------|
* | region nregs-1 |
* \--------------------/
*/
struct arena_bin_info_s {
/* Size of regions in a run for this bin's size class. */
size_t reg_size;
/* Redzone size. */
size_t redzone_size;
/* Interval between regions (reg_size + (redzone_size << 1)). */
size_t reg_interval;
/* Total size of a run for this bin's size class. */
size_t run_size;
@@ -252,9 +226,6 @@ struct arena_bin_info_s {
* bin.
*/
bitmap_info_t bitmap_info;
/* Offset of first region in a run for this bin's size class. */
uint32_t reg0_offset;
};
struct arena_bin_s {
@@ -543,9 +514,6 @@ void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
uint8_t);
extern arena_redzone_corruption_t *arena_redzone_corruption;
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
@@ -1113,8 +1081,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
rpages = arena_miscelm_to_rpages(miscelm);
assert(((uintptr_t)ptr - ((uintptr_t)rpages +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
assert(((uintptr_t)ptr - (uintptr_t)rpages) % bin_info->reg_size
== 0);
}
@@ -1142,18 +1109,16 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
* Freeing a pointer lower than region zero can cause assertion
* failure.
*/
assert((uintptr_t)ptr >= (uintptr_t)rpages +
(uintptr_t)bin_info->reg0_offset);
assert((uintptr_t)ptr >= (uintptr_t)rpages);
/*
* Avoid doing division with a variable divisor if possible. Using
* actual division here can reduce allocator throughput by over 20%!
*/
diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages -
bin_info->reg0_offset);
diff = (size_t)((uintptr_t)ptr - (uintptr_t)rpages);
/* Rescale (factor powers of 2 out of the numerator and denominator). */
interval = bin_info->reg_interval;
interval = bin_info->reg_size;
shift = ffs_zu(interval) - 1;
diff >>= shift;
interval >>= shift;

View File

@@ -423,7 +423,6 @@ extern bool opt_abort;
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
extern bool opt_redzone;
extern bool opt_utrace;
extern bool opt_xmalloc;
extern bool opt_zero;
@@ -888,8 +887,6 @@ void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(tsdn_t *tsdn, const void *ptr);
void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path);
void idalloc(tsd_t *tsd, void *ptr);
@@ -1011,28 +1008,6 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
return (isalloc(tsdn, ptr, demote));
}
JEMALLOC_INLINE size_t
u2rz(size_t usize)
{
size_t ret;
if (usize <= SMALL_MAXCLASS) {
szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;
return (ret);
}
JEMALLOC_INLINE size_t
p2rz(tsdn_t *tsdn, const void *ptr)
{
size_t usize = isalloc(tsdn, ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)

View File

@@ -142,7 +142,7 @@
*/
#undef JEMALLOC_DSS
/* Support memory filling (junk/zero/redzone). */
/* Support memory filling (junk/zero). */
#undef JEMALLOC_FILL
/* Support utrace(2)-based tracing. */

View File

@@ -103,7 +103,6 @@ arena_ralloc
arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
arena_reset
arena_run_regind
arena_run_to_miscelm
@@ -382,13 +381,11 @@ opt_prof_leak
opt_prof_prefix
opt_prof_thread_active_init
opt_purge
opt_redzone
opt_stats_print
opt_tcache
opt_utrace
opt_xmalloc
opt_zero
p2rz
pages_boot
pages_commit
pages_decommit
@@ -578,7 +575,6 @@ tsd_witnessesp_get
tsdn_fetch
tsdn_null
tsdn_tsd
u2rz
witness_assert_lockless
witness_assert_not_owner
witness_assert_owner