Implement Valgrind support, redzones, and quarantine.

Implement Valgrind support, as well as the redzone and quarantine
features, which help Valgrind detect memory errors.  Redzones are only
implemented for small objects because the changes necessary to support
redzones around large and huge objects are complicated by in-place
reallocation, to the point that it isn't clear that the maintenance
burden is worth the incremental improvement to Valgrind support.

Merge arena_salloc() and arena_salloc_demote().

Refactor i[v]salloc() to expose the 'demote' option.
This commit is contained in:
Jason Evans
2012-04-06 00:35:09 -07:00
parent a1ee7838e1
commit 122449b073
20 changed files with 840 additions and 170 deletions

View File

@@ -140,7 +140,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
(uintptr_t)(bin_info->reg_size * regind));
(uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
if (regind == run->nextind)
run->nextind++;
@@ -161,8 +161,8 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % (uintptr_t)bin_info->reg_size
== 0);
(uintptr_t)bin_info->reg0_offset)) %
(uintptr_t)bin_info->reg_interval == 0);
assert((uintptr_t)ptr >= (uintptr_t)run +
(uintptr_t)bin_info->reg0_offset);
/* Freeing an unallocated pointer can cause assertion failure. */
@@ -260,10 +260,18 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
for (i = 0; i < need_pages; i++) {
if ((chunk->map[run_ind+i-map_bias].bits
& CHUNK_MAP_UNZEROED) != 0) {
VALGRIND_MAKE_MEM_UNDEFINED(
(void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), PAGE);
memset((void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), 0, PAGE);
} else if (config_debug) {
VALGRIND_MAKE_MEM_DEFINED(
(void *)((uintptr_t)
chunk + ((run_ind+i) <<
LG_PAGE)), PAGE);
arena_chunk_validate_zeroed(
chunk, run_ind+i);
}
@@ -273,6 +281,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* The run is dirty, so all pages must be
* zeroed.
*/
VALGRIND_MAKE_MEM_UNDEFINED((void
*)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), (need_pages << LG_PAGE));
memset((void *)((uintptr_t)chunk + (run_ind <<
LG_PAGE)), 0, (need_pages << LG_PAGE));
}
@@ -1245,6 +1256,10 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL)
break;
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
true);
}
/* Insert such that low regions get used first. */
tbin->avail[nfill - 1 - i] = ptr;
}
@@ -1259,6 +1274,55 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
tbin->ncached = i;
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
if (zero) {
size_t redzone_size = bin_info->redzone_size;
memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
redzone_size);
memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
redzone_size);
} else {
memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
bin_info->reg_interval);
}
}
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
{
size_t size = bin_info->reg_size;
size_t redzone_size = bin_info->redzone_size;
size_t i;
bool error = false;
for (i = 1; i <= redzone_size; i++) {
unsigned byte;
if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
error = true;
malloc_printf("<jemalloc>: Corrupt redzone "
"%zu byte%s before %p (size %zu), byte=%#x\n", i,
(i == 1) ? "" : "s", ptr, size, byte);
}
}
for (i = 0; i < redzone_size; i++) {
unsigned byte;
if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
error = true;
malloc_printf("<jemalloc>: Corrupt redzone "
"%zu byte%s after end of %p (size %zu), byte=%#x\n",
i, (i == 1) ? "" : "s", ptr, size, byte);
}
}
if (opt_abort && error)
abort();
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
{
@@ -1297,13 +1361,20 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
if (zero == false) {
if (config_fill) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
if (opt_junk) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (opt_zero)
memset(ret, 0, size);
}
} else
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
return (ret);
}
@@ -1412,7 +1483,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
/* Return the size of the allocation pointed to by ptr. */
size_t
arena_salloc(const void *ptr)
arena_salloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@@ -1431,12 +1502,19 @@ arena_salloc(const void *ptr)
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
0);
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval
== 0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
if (demote && prof_promote && ret == PAGE && (mapbits &
CHUNK_MAP_CLASS_MASK) != 0) {
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
CHUNK_MAP_CLASS_SHIFT) - 1;
assert(binind < NBINS);
ret = arena_bin_info[binind].reg_size;
}
assert(ret != 0);
}
@@ -1449,9 +1527,11 @@ arena_prof_promoted(const void *ptr, size_t size)
arena_chunk_t *chunk;
size_t pageind, binind;
assert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
assert(isalloc(ptr) == PAGE);
assert(isalloc(ptr, false) == PAGE);
assert(isalloc(ptr, true) == PAGE);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -1460,45 +1540,9 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(binind < NBINS);
chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
}
size_t
arena_salloc_demote(const void *ptr)
{
size_t ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = chunk->map[pageind-map_bias].bits;
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
size_t binind = arena_bin_index(chunk->arena, run->bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
assert(((uintptr_t)ptr - ((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset)) % bin_info->reg_size ==
0);
ret = bin_info->reg_size;
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = mapbits & ~PAGE_MASK;
if (prof_promote && ret == PAGE && (mapbits &
CHUNK_MAP_CLASS_MASK) != 0) {
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
CHUNK_MAP_CLASS_SHIFT) - 1;
assert(binind < NBINS);
ret = arena_bin_info[binind].reg_size;
}
assert(ret != 0);
}
return (ret);
assert(isalloc(ptr, false) == PAGE);
assert(isalloc(ptr, true) == size);
}
static void
@@ -1545,7 +1589,8 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
past = (size_t)(PAGE_CEILING((uintptr_t)run +
(uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
bin_info->reg_size) - (uintptr_t)chunk) >> LG_PAGE);
bin_info->reg_interval - bin_info->redzone_size) -
(uintptr_t)chunk) >> LG_PAGE);
malloc_mutex_lock(&arena->lock);
/*
@@ -1617,7 +1662,7 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size = bin_info->reg_size;
if (config_fill && opt_junk)
memset(ptr, 0x5a, size);
arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
@@ -1936,7 +1981,7 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
idalloc(ptr);
iqalloc(ptr);
return (ret);
}
@@ -2007,16 +2052,40 @@ arena_new(arena_t *arena, unsigned ind)
static size_t
bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
{
size_t pad_size;
size_t try_run_size, good_run_size;
uint32_t try_nregs, good_nregs;
uint32_t try_hdr_size, good_hdr_size;
uint32_t try_bitmap_offset, good_bitmap_offset;
uint32_t try_ctx0_offset, good_ctx0_offset;
uint32_t try_reg0_offset, good_reg0_offset;
uint32_t try_redzone0_offset, good_redzone0_offset;
assert(min_run_size >= PAGE);
assert(min_run_size <= arena_maxclass);
/*
* Determine redzone size based on minimum alignment and minimum
* redzone size. Add padding to the end of the run if it is needed to
* align the regions. The padding allows each redzone to be half the
* minimum alignment; without the padding, each redzone would have to
* be twice as large in order to maintain alignment.
*/
if (config_fill && opt_redzone) {
size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
} else {
bin_info->redzone_size = align_min >> 1;
pad_size = bin_info->redzone_size;
}
} else {
bin_info->redzone_size = 0;
pad_size = 0;
}
bin_info->reg_interval = bin_info->reg_size +
(bin_info->redzone_size << 1);
/*
* Calculate known-valid settings before entering the run_size
* expansion loop, so that the first part of the loop always copies
@@ -2028,7 +2097,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* header's mask length and the number of regions.
*/
try_run_size = min_run_size;
try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin_info->reg_size)
try_nregs = ((try_run_size - sizeof(arena_run_t)) /
bin_info->reg_interval)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
@@ -2050,9 +2120,9 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
} else
try_ctx0_offset = 0;
try_reg0_offset = try_run_size - (try_nregs *
bin_info->reg_size);
} while (try_hdr_size > try_reg0_offset);
try_redzone0_offset = try_run_size - (try_nregs *
bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset);
/* run_size expansion loop. */
do {
@@ -2064,12 +2134,12 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
good_hdr_size = try_hdr_size;
good_bitmap_offset = try_bitmap_offset;
good_ctx0_offset = try_ctx0_offset;
good_reg0_offset = try_reg0_offset;
good_redzone0_offset = try_redzone0_offset;
/* Try more aggressive settings. */
try_run_size += PAGE;
try_nregs = ((try_run_size - sizeof(arena_run_t)) /
bin_info->reg_size)
try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
bin_info->reg_interval)
+ 1; /* Counter-act try_nregs-- in loop. */
if (try_nregs > RUN_MAXREGS) {
try_nregs = RUN_MAXREGS
@@ -2093,23 +2163,27 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
try_hdr_size += try_nregs *
sizeof(prof_ctx_t *);
}
try_reg0_offset = try_run_size - (try_nregs *
bin_info->reg_size);
} while (try_hdr_size > try_reg0_offset);
try_redzone0_offset = try_run_size - (try_nregs *
bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset);
} while (try_run_size <= arena_maxclass
&& try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin_info->reg_size << 3) > RUN_MAX_OVRHD_RELAX
&& (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
&& RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
RUN_MAX_OVRHD_RELAX
&& (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
&& try_nregs < RUN_MAXREGS);
assert(good_hdr_size <= good_reg0_offset);
assert(good_hdr_size <= good_redzone0_offset);
/* Copy final settings. */
bin_info->run_size = good_run_size;
bin_info->nregs = good_nregs;
bin_info->bitmap_offset = good_bitmap_offset;
bin_info->ctx0_offset = good_ctx0_offset;
bin_info->reg0_offset = good_reg0_offset;
bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
* bin_info->reg_interval) + pad_size == bin_info->run_size);
return (good_run_size);
}

View File

@@ -57,6 +57,7 @@ CTL_PROTO(config_stats)
CTL_PROTO(config_tcache)
CTL_PROTO(config_tls)
CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_lg_chunk)
@@ -65,7 +66,10 @@ CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_quarantine)
CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_valgrind)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
@@ -179,6 +183,7 @@ static const ctl_node_t config_node[] = {
{NAME("tcache"), CTL(config_tcache)},
{NAME("tls"), CTL(config_tls)},
{NAME("utrace"), CTL(config_utrace)},
{NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
@@ -190,7 +195,10 @@ static const ctl_node_t opt_node[] = {
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("quarantine"), CTL(opt_quarantine)},
{NAME("redzone"), CTL(opt_redzone)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("valgrind"), CTL(opt_valgrind)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
{NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
@@ -1050,7 +1058,8 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Set new arena association. */
if (config_tcache) {
tcache_t *tcache;
if ((tcache = *tcache_tsd_get()) != NULL) {
if ((uintptr_t)(tcache = *tcache_tsd_get()) >
(uintptr_t)TCACHE_STATE_MAX) {
tcache_arena_dissociate(tcache);
tcache_arena_associate(tcache, arena);
}
@@ -1085,6 +1094,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_stats)
CTL_RO_BOOL_CONFIG_GEN(config_tcache)
CTL_RO_BOOL_CONFIG_GEN(config_tls)
CTL_RO_BOOL_CONFIG_GEN(config_utrace)
CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
@@ -1096,7 +1106,10 @@ CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)

View File

@@ -174,7 +174,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
idalloc(ptr);
iqalloc(ptr);
}
return (ret);
}

View File

@@ -14,14 +14,19 @@ const char *je_malloc_conf JEMALLOC_ATTR(visibility("default"));
bool opt_abort = true;
# ifdef JEMALLOC_FILL
bool opt_junk = true;
bool opt_redzone = true;
# else
bool opt_junk = false;
bool opt_redzone = false;
# endif
#else
bool opt_abort = false;
bool opt_junk = false;
bool opt_redzone = false;
#endif
size_t opt_quarantine = ZU(0);
bool opt_utrace = false;
bool opt_valgrind = false;
bool opt_xmalloc = false;
bool opt_zero = false;
size_t opt_narenas = 0;
@@ -419,7 +424,7 @@ malloc_conf_init(void)
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
#define CONF_HANDLE_BOOL(o, n) \
#define CONF_HANDLE_BOOL_HIT(o, n, hit) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
@@ -433,12 +438,19 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} \
hit = true; \
} else \
hit = false;
#define CONF_HANDLE_BOOL(o, n) { \
bool hit; \
CONF_HANDLE_BOOL_HIT(o, n, hit); \
if (hit) \
continue; \
}
}
#define CONF_HANDLE_SIZE_T(o, n, min, max) \
if (sizeof(#n)-1 == klen && strncmp(#n, k, \
klen) == 0) { \
uintmax_t um; \
uintmax_t um; \
char *end; \
\
errno = 0; \
@@ -502,11 +514,30 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_stats_print, stats_print)
if (config_fill) {
CONF_HANDLE_BOOL(opt_junk, junk)
CONF_HANDLE_SIZE_T(opt_quarantine, quarantine,
0, SIZE_T_MAX)
CONF_HANDLE_BOOL(opt_redzone, redzone)
CONF_HANDLE_BOOL(opt_zero, zero)
}
if (config_utrace) {
CONF_HANDLE_BOOL(opt_utrace, utrace)
}
if (config_valgrind) {
bool hit;
CONF_HANDLE_BOOL_HIT(opt_valgrind,
valgrind, hit)
if (config_fill && opt_valgrind && hit) {
opt_junk = false;
opt_zero = false;
if (opt_quarantine == 0) {
opt_quarantine =
JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
}
opt_redzone = true;
}
if (hit)
continue;
}
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, xmalloc)
}
@@ -662,6 +693,11 @@ malloc_init_hard(void)
return (true);
}
if (config_fill && quarantine_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
@@ -763,7 +799,7 @@ je_malloc(size_t size)
} else
ret = imalloc(size);
} else {
if (config_stats)
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
ret = imalloc(size);
}
@@ -780,10 +816,11 @@ label_oom:
if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret));
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
@@ -872,7 +909,7 @@ imemalign(void **memptr, size_t alignment, size_t size,
label_return:
if (config_stats && result != NULL) {
assert(usize == isalloc(result));
assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
if (config_prof && opt_prof && result != NULL)
@@ -886,8 +923,10 @@ JEMALLOC_ATTR(visibility("default"))
int
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
return imemalign(memptr, alignment, size, sizeof(void *));
int ret = imemalign(memptr, alignment, size, sizeof(void *));
JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
config_prof), false);
return (ret);
}
JEMALLOC_ATTR(malloc)
@@ -902,6 +941,8 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
errno = err;
}
JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
false);
return (ret);
}
@@ -956,7 +997,7 @@ je_calloc(size_t num, size_t size)
} else
ret = icalloc(num_size);
} else {
if (config_stats)
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(num_size);
ret = icalloc(num_size);
}
@@ -974,10 +1015,11 @@ label_return:
if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret));
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, num_size, ret);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
@@ -988,19 +1030,30 @@ je_realloc(void *ptr, size_t size)
void *ret;
size_t usize;
size_t old_size = 0;
size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
if (size == 0) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(p). */
if (config_prof || config_stats)
old_size = isalloc(ptr);
if (config_prof) {
old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
old_rzsize = p2rz(ptr);
} else if (config_stats) {
old_size = isalloc(ptr, false);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
old_size = isalloc(ptr, false);
old_rzsize = u2rz(old_size);
}
if (config_prof && opt_prof) {
old_ctx = prof_ctx_get(ptr);
cnt = NULL;
}
idalloc(ptr);
iqalloc(ptr);
ret = NULL;
goto label_return;
} else
@@ -1010,8 +1063,18 @@ je_realloc(void *ptr, size_t size)
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof || config_stats)
old_size = isalloc(ptr);
if (config_prof) {
old_size = isalloc(ptr, true);
if (config_valgrind && opt_valgrind)
old_rzsize = p2rz(ptr);
} else if (config_stats) {
old_size = isalloc(ptr, false);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
old_size = isalloc(ptr, false);
old_rzsize = u2rz(old_size);
}
if (config_prof && opt_prof) {
usize = s2u(size);
old_ctx = prof_ctx_get(ptr);
@@ -1035,7 +1098,7 @@ je_realloc(void *ptr, size_t size)
old_ctx = NULL;
}
} else {
if (config_stats)
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
ret = iralloc(ptr, size, 0, 0, false, false);
}
@@ -1076,7 +1139,8 @@ label_oom:
ret = imalloc(size);
}
} else {
if (config_stats)
if (config_stats || (config_valgrind &&
opt_valgrind))
usize = s2u(size);
ret = imalloc(size);
}
@@ -1097,12 +1161,13 @@ label_return:
prof_realloc(ret, usize, cnt, old_size, old_ctx);
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
assert(usize == isalloc(ret));
assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get();
ta->allocated += usize;
ta->deallocated += old_size;
}
UTRACE(ptr, size, ret);
JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
return (ret);
}
@@ -1114,18 +1179,21 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (ptr != NULL) {
size_t usize;
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(malloc_initialized || IS_INITIALIZER);
if (config_prof && opt_prof) {
usize = isalloc(ptr);
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
} else if (config_stats) {
usize = isalloc(ptr);
}
} else if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
idalloc(ptr);
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
iqalloc(ptr);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
}
@@ -1145,6 +1213,7 @@ je_memalign(size_t alignment, size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
imemalign(&ret, alignment, size, 1);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -1157,6 +1226,7 @@ je_valloc(size_t size)
{
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
imemalign(&ret, PAGE, size, 1);
JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -1209,9 +1279,9 @@ je_malloc_usable_size(const void *ptr)
assert(malloc_initialized || IS_INITIALIZER);
if (config_ivsalloc)
ret = ivsalloc(ptr);
ret = ivsalloc(ptr, config_prof);
else
ret = (ptr != NULL) ? isalloc(ptr) : 0;
ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
return (ret);
}
@@ -1336,10 +1406,11 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
*ptr = p;
if (config_stats) {
assert(usize == isalloc(p));
assert(usize == isalloc(p, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
UTRACE(0, size, p);
JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
return (ALLOCM_SUCCESS);
label_oom:
if (config_xmalloc && opt_xmalloc) {
@@ -1360,6 +1431,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
void *p, *q;
size_t usize;
size_t old_size;
size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
@@ -1384,7 +1456,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment, NULL);
prof_ctx_t *old_ctx = prof_ctx_get(p);
old_size = isalloc(p);
old_size = isalloc(p, true);
if (config_valgrind && opt_valgrind)
old_rzsize = p2rz(p);
PROF_ALLOC_PREP(1, max_usize, cnt);
if (cnt == NULL)
goto label_oom;
@@ -1403,27 +1477,33 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
usize = max_usize;
arena_prof_promoted(q, usize);
} else
usize = isalloc(q);
usize = isalloc(q, config_prof);
} else {
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto label_err;
usize = isalloc(q);
usize = isalloc(q, config_prof);
}
prof_realloc(q, usize, cnt, old_size, old_ctx);
if (rsize != NULL)
*rsize = usize;
} else {
if (config_stats)
old_size = isalloc(p);
if (config_stats) {
old_size = isalloc(p, false);
if (config_valgrind && opt_valgrind)
old_rzsize = u2rz(old_size);
} else if (config_valgrind && opt_valgrind) {
old_size = isalloc(p, false);
old_rzsize = u2rz(old_size);
}
q = iralloc(p, size, extra, alignment, zero, no_move);
if (q == NULL)
goto label_err;
if (config_stats)
usize = isalloc(q);
usize = isalloc(q, config_prof);
if (rsize != NULL) {
if (config_stats == false)
usize = isalloc(q);
usize = isalloc(q, config_prof);
*rsize = usize;
}
}
@@ -1436,6 +1516,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
ta->deallocated += old_size;
}
UTRACE(p, size, q);
JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
return (ALLOCM_SUCCESS);
label_err:
if (no_move) {
@@ -1462,10 +1543,10 @@ je_sallocm(const void *ptr, size_t *rsize, int flags)
assert(malloc_initialized || IS_INITIALIZER);
if (config_ivsalloc)
sz = ivsalloc(ptr);
sz = ivsalloc(ptr, config_prof);
else {
assert(ptr != NULL);
sz = isalloc(ptr);
sz = isalloc(ptr, config_prof);
}
assert(rsize != NULL);
*rsize = sz;
@@ -1479,21 +1560,25 @@ int
je_dallocm(void *ptr, int flags)
{
size_t usize;
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
UTRACE(ptr, 0, 0);
if (config_stats)
usize = isalloc(ptr);
if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
if (config_prof && opt_prof) {
if (config_stats == false)
usize = isalloc(ptr);
if (config_stats == false && config_valgrind == false)
usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
}
if (config_stats)
thread_allocated_tsd_get()->deallocated += usize;
idalloc(ptr);
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
iqalloc(ptr);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
return (ALLOCM_SUCCESS);
}

163
src/quarantine.c Normal file
View File

@@ -0,0 +1,163 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
/* Data. */
typedef struct quarantine_s quarantine_t;
struct quarantine_s {
size_t curbytes;
size_t curobjs;
size_t first;
#define LG_MAXOBJS_INIT 10
size_t lg_maxobjs;
void *objs[1]; /* Dynamically sized ring buffer. */
};
static void quarantine_cleanup(void *arg);
malloc_tsd_data(static, quarantine, quarantine_t *, NULL)
malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL,
quarantine_cleanup)
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_init(size_t lg_maxobjs);
static quarantine_t *quarantine_grow(quarantine_t *quarantine);
static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
/******************************************************************************/
static quarantine_t *
quarantine_init(size_t lg_maxobjs)
{
quarantine_t *quarantine;
quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
((ZU(1) << lg_maxobjs) * sizeof(void *)));
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
quarantine->curobjs = 0;
quarantine->first = 0;
quarantine->lg_maxobjs = lg_maxobjs;
quarantine_tsd_set(&quarantine);
return (quarantine);
}
static quarantine_t *
quarantine_grow(quarantine_t *quarantine)
{
quarantine_t *ret;
ret = quarantine_init(quarantine->lg_maxobjs + 1);
if (ret == NULL)
return (quarantine);
ret->curbytes = quarantine->curbytes;
if (quarantine->first + quarantine->curobjs < (ZU(1) <<
quarantine->lg_maxobjs)) {
/* objs ring buffer data are contiguous. */
memcpy(ret->objs, &quarantine->objs[quarantine->first],
quarantine->curobjs * sizeof(void *));
ret->curobjs = quarantine->curobjs;
} else {
/* objs ring buffer data wrap around. */
size_t ncopy = (ZU(1) << quarantine->lg_maxobjs) -
quarantine->first;
memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy *
sizeof(void *));
ret->curobjs = ncopy;
if (quarantine->curobjs != 0) {
memcpy(&ret->objs[ret->curobjs], quarantine->objs,
quarantine->curobjs - ncopy);
}
}
return (ret);
}
static void
quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) {
void *ptr = quarantine->objs[quarantine->first];
size_t usize = isalloc(ptr, config_prof);
idalloc(ptr);
quarantine->curbytes -= usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
quarantine->lg_maxobjs) - 1);
}
}
void
quarantine(void *ptr)
{
quarantine_t *quarantine;
size_t usize = isalloc(ptr, config_prof);
assert(config_fill);
assert(opt_quarantine);
quarantine = *quarantine_tsd_get();
if (quarantine == NULL && (quarantine =
quarantine_init(LG_MAXOBJS_INIT)) == NULL) {
idalloc(ptr);
return;
}
/*
* Drain one or more objects if the quarantine size limit would be
* exceeded by appending ptr.
*/
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
quarantine_drain(quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
quarantine = quarantine_grow(quarantine);
/* quarantine_grow() must free a slot if it fails to grow. */
assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
/* Append ptr if its size doesn't exceed the quarantine size. */
if (quarantine->curbytes + usize <= opt_quarantine) {
size_t offset = (quarantine->first + quarantine->curobjs) &
((ZU(1) << quarantine->lg_maxobjs) - 1);
quarantine->objs[offset] = ptr;
quarantine->curbytes += usize;
quarantine->curobjs++;
if (opt_junk)
memset(ptr, 0x5a, usize);
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
}
}
static void
quarantine_cleanup(void *arg)
{
quarantine_t *quarantine = *(quarantine_t **)arg;
if (quarantine != NULL) {
quarantine_drain(quarantine, 0);
idalloc(quarantine);
}
}
bool
quarantine_boot(void)
{
assert(config_fill);
if (quarantine_tsd_boot())
return (true);
return (false);
}

View File

@@ -382,8 +382,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
OPT_WRITE_SIZE_T(quarantine)
OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
OPT_WRITE_BOOL(utrace)
OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
OPT_WRITE_SSIZE_T(lg_tcache_max)

View File

@@ -75,6 +75,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
&chunk->map[pageind-map_bias];
if (config_fill && opt_junk) {
arena_alloc_junk_small(ptr,
&arena_bin_info[binind], true);
}
arena_dalloc_bin(arena, chunk, ptr, mapelm);
} else {
/*
@@ -298,7 +302,7 @@ tcache_destroy(tcache_t *tcache)
malloc_mutex_unlock(&tcache->arena->lock);
}
tcache_size = arena_salloc(tcache);
tcache_size = arena_salloc(tcache, false);
if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;

View File

@@ -56,7 +56,7 @@ zone_size(malloc_zone_t *zone, void *ptr)
* not work in practice, we must check all pointers to assure that they
* reside within a mapped chunk before determining size.
*/
return (ivsalloc(ptr));
return (ivsalloc(ptr, config_prof));
}
static void *
@@ -87,7 +87,7 @@ static void
zone_free(malloc_zone_t *zone, void *ptr)
{
if (ivsalloc(ptr) != 0) {
if (ivsalloc(ptr, config_prof) != 0) {
je_free(ptr);
return;
}
@@ -99,7 +99,7 @@ static void *
zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr) != 0)
if (ivsalloc(ptr, config_prof) != 0)
return (je_realloc(ptr, size));
return (realloc(ptr, size));
@@ -122,8 +122,8 @@ static void
zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
{
if (ivsalloc(ptr) != 0) {
assert(ivsalloc(ptr) == size);
if (ivsalloc(ptr, config_prof) != 0) {
assert(ivsalloc(ptr, config_prof) == size);
je_free(ptr);
return;
}