Make *allocx() size class overflow behavior defined.

Limit supported size and alignment to HUGE_MAXCLASS, which in turn is
now limited to be less than PTRDIFF_MAX.

This resolves #278 and #295.
This commit is contained in:
Jason Evans
2016-02-25 15:29:49 -08:00
parent 767d85061a
commit 0c516a00c4
14 changed files with 247 additions and 89 deletions

View File

@@ -2364,16 +2364,16 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
}
static void *
arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
bool zero)
arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
arena_bin_t *bin;
size_t usize;
arena_run_t *run;
assert(binind < NBINS);
bin = &arena->bins[binind];
size = index2size(binind);
usize = index2size(binind);
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
@@ -2392,7 +2392,7 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
bin->stats.curregs++;
}
malloc_mutex_unlock(&bin->lock);
if (config_prof && !isthreaded && arena_prof_accum(arena, size))
if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
prof_idump();
if (!zero) {
@@ -2401,16 +2401,16 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
} else if (unlikely(opt_zero))
memset(ret, 0, size);
memset(ret, 0, usize);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
} else {
if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
memset(ret, 0, usize);
}
arena_decay_tick(tsd, arena);
@@ -2418,8 +2418,7 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
}
void *
arena_malloc_large(tsd_t *tsd, arena_t *arena, size_t size, szind_t binind,
bool zero)
arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
size_t usize;
@@ -2490,10 +2489,10 @@ arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
return (NULL);
if (likely(size <= SMALL_MAXCLASS))
return (arena_malloc_small(tsd, arena, size, ind, zero));
return (arena_malloc_small(tsd, arena, ind, zero));
if (likely(size <= large_maxclass))
return (arena_malloc_large(tsd, arena, size, ind, zero));
return (huge_malloc(tsd, arena, size, zero, tcache));
return (arena_malloc_large(tsd, arena, ind, zero));
return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
}
/* Only handles large allocations that require more than page alignment. */
@@ -3047,6 +3046,13 @@ arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
{
size_t usize_min, usize_max;
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
/* Prevent exceeding PTRDIFF_MAX. */
if (unlikely(size > HUGE_MAXCLASS))
return (true);
usize_min = s2u(size);
usize_max = s2u(size + extra);
if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
@@ -3089,7 +3095,7 @@ arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
tcache, true));
usize = sa2u(usize, alignment);
if (usize == 0)
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
}
@@ -3102,7 +3108,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize;
usize = s2u(size);
if (usize == 0)
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
return (NULL);
if (likely(usize <= large_maxclass)) {

View File

@@ -266,7 +266,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0) {
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
ret = true;
goto label_return;
}
@@ -312,7 +312,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (usize == 0)
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return;
tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
NULL);
@@ -387,7 +387,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (usize == 0) {
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
ret = true;
goto label_return;
}

View File

@@ -31,35 +31,30 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
tcache_t *tcache)
{
size_t usize;
usize = s2u(size);
if (usize == 0) {
/* size_t overflow. */
return (NULL);
}
assert(usize == s2u(usize));
return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
}
void *
huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
size_t usize;
size_t ausize;
extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
usize = sa2u(size, alignment);
if (unlikely(usize == 0))
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
return (NULL);
assert(usize >= chunksize);
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
@@ -74,15 +69,15 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
is_zeroed = zero;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
size, alignment, &is_zeroed)) == NULL) {
usize, alignment, &is_zeroed)) == NULL) {
idalloctm(tsd, node, tcache, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, size, is_zeroed, true);
extent_node_init(node, arena, ret, usize, is_zeroed, true);
if (huge_node_set(ret, node)) {
arena_chunk_dalloc_huge(arena, ret, size);
arena_chunk_dalloc_huge(arena, ret, usize);
idalloctm(tsd, node, tcache, true, true);
return (NULL);
}
@@ -95,9 +90,9 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
memset(ret, 0, size);
memset(ret, 0, usize);
} else if (config_fill && unlikely(opt_junk_alloc))
memset(ret, 0xa5, size);
memset(ret, 0xa5, usize);
arena_decay_tick(tsd, arena);
return (ret);
@@ -286,6 +281,8 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
{
assert(s2u(oldsize) == oldsize);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
/* Both allocations must be huge to avoid a move. */
if (oldsize < chunksize || usize_max < chunksize)
@@ -346,6 +343,9 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
void *ret;
size_t copysize;
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Try to avoid moving the allocation. */
if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
return (ptr);

View File

@@ -1449,18 +1449,17 @@ imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
return (NULL);
*tsd = tsd_fetch();
ind = size2index(size);
if (unlikely(ind >= NSIZES))
return (NULL);
if (config_stats ||
(config_prof && opt_prof) ||
(slow_path && config_valgrind && unlikely(in_valgrind))) {
if (config_stats || (config_prof && opt_prof) || (slow_path &&
config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
if (config_prof && opt_prof) {
if (unlikely(*usize == 0))
return (NULL);
if (config_prof && opt_prof)
return (imalloc_prof(*tsd, *usize, ind, slow_path));
}
return (imalloc(*tsd, size, ind, slow_path));
}
@@ -1584,7 +1583,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
}
usize = sa2u(size, alignment);
if (unlikely(usize == 0)) {
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
result = NULL;
goto label_oom;
}
@@ -1722,12 +1721,12 @@ je_calloc(size_t num, size_t size)
}
ind = size2index(num_size);
if (unlikely(ind >= NSIZES)) {
ret = NULL;
goto label_return;
}
if (config_prof && opt_prof) {
usize = index2size(ind);
if (unlikely(usize == 0)) {
ret = NULL;
goto label_return;
}
ret = icalloc_prof(tsd, usize, ind);
} else {
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
@@ -1874,8 +1873,8 @@ je_realloc(void *ptr, size_t size)
if (config_prof && opt_prof) {
usize = s2u(size);
ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
ptr, old_usize, usize);
ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
NULL : irealloc_prof(tsd, ptr, old_usize, usize);
} else {
if (config_stats || (config_valgrind &&
unlikely(in_valgrind)))
@@ -2006,7 +2005,8 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
*usize = sa2u(size, *alignment);
}
assert(*usize != 0);
if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
return (true);
*zero = MALLOCX_ZERO_GET(flags);
if ((flags & MALLOCX_TCACHE_MASK) != 0) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
@@ -2032,7 +2032,6 @@ imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
if (likely(flags == 0)) {
*usize = s2u(size);
assert(*usize != 0);
*alignment = 0;
*zero = false;
*tcache = tcache_get(tsd, true);
@@ -2051,6 +2050,8 @@ imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
szind_t ind;
ind = size2index(usize);
if (unlikely(ind >= NSIZES))
return (NULL);
if (unlikely(alignment != 0))
return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
if (unlikely(zero))
@@ -2120,8 +2121,13 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
if (likely(flags == 0)) {
szind_t ind = size2index(size);
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
if (unlikely(ind >= NSIZES))
return (NULL);
if (config_stats || (config_valgrind &&
unlikely(in_valgrind))) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
return (imalloc(tsd, size, ind, true));
}
@@ -2278,7 +2284,8 @@ je_rallocx(void *ptr, size_t size, int flags)
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
assert(usize != 0);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
goto label_oom;
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
zero, tcache, arena);
if (unlikely(p == NULL))
@@ -2392,14 +2399,23 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
old_usize = isalloc(ptr, config_prof);
/* Clamp extra if necessary to avoid (size + extra) overflow. */
if (unlikely(size + extra > HUGE_MAXCLASS)) {
/* Check for size overflow. */
if (unlikely(extra > 0)) {
/*
* The API explicitly absolves itself of protecting against
* (size + extra) numerical overflow, but we may need to clamp
* extra to avoid exceeding HUGE_MAXCLASS.
*
* Ordinarily, size limit checking is handled deeper down, but
* here we have to check as part of (size + extra) clamping,
* since we need the clamped value in the above helper
* functions.
*/
if (unlikely(size > HUGE_MAXCLASS)) {
usize = old_usize;
goto label_not_resized;
}
extra = HUGE_MAXCLASS - size;
if (unlikely(HUGE_MAXCLASS - size < extra))
extra = HUGE_MAXCLASS - size;
}
if (config_valgrind && unlikely(in_valgrind))
@@ -2474,7 +2490,6 @@ inallocx(size_t size, int flags)
usize = s2u(size);
else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
assert(usize != 0);
return (usize);
}
@@ -2507,13 +2522,18 @@ JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)
je_nallocx(size_t size, int flags)
{
size_t usize;
assert(size != 0);
if (unlikely(malloc_init()))
return (0);
return (inallocx(size, flags));
usize = inallocx(size, flags);
if (unlikely(usize > HUGE_MAXCLASS))
return (0);
return (usize);
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW