Fix size class overflow bugs.

Avoid calling s2u() on raw extent sizes in extent_recycle().

Clamp psz2ind() (implemented as psz2ind_clamp()) when inserting/removing
into/from size-segregated extent heaps.
This commit is contained in:
Jason Evans 2016-10-03 14:18:55 -07:00
parent d51139c33c
commit 871a9498e1
4 changed files with 30 additions and 8 deletions

View File

@ -516,7 +516,9 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/large.h"
#ifndef JEMALLOC_ENABLE_INLINE
pszind_t psz2ind_impl(size_t psz, bool clamp);
pszind_t psz2ind(size_t psz);
pszind_t psz2ind_clamp(size_t psz);
size_t pind2sz_compute(pszind_t pind);
size_t pind2sz_lookup(pszind_t pind);
size_t pind2sz(pszind_t pind);
@ -541,12 +543,12 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE pszind_t
psz2ind(size_t psz)
JEMALLOC_ALWAYS_INLINE pszind_t
psz2ind_impl(size_t psz, bool clamp)
{
if (unlikely(psz > LARGE_MAXCLASS))
return (NPSIZES);
return (clamp ? NPSIZES-1 : NPSIZES);
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
@ -565,6 +567,20 @@ psz2ind(size_t psz)
}
}
JEMALLOC_INLINE pszind_t
psz2ind(size_t psz)
{
return (psz2ind_impl(psz, false));
}
JEMALLOC_INLINE pszind_t
psz2ind_clamp(size_t psz)
{
return (psz2ind_impl(psz, true));
}
JEMALLOC_INLINE size_t
pind2sz_compute(pszind_t pind)
{

View File

@ -395,6 +395,8 @@ prof_thread_active_set
prof_thread_name_get
prof_thread_name_set
psz2ind
psz2ind_clamp
psz2ind_impl
psz2u
purge_mode_names
register_zone

View File

@ -769,8 +769,8 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
/* Allocate. */
zero = false;
textent = extent_alloc_cache_locked(tsdn, arena, r_extent_hooks,
extent_base_get(extent), extent_size_get(extent), 0,
CACHELINE, &zero, false);
extent_base_get(extent), extent_size_get(extent), 0, PAGE,
&zero, false);
assert(textent == extent);
assert(zero == extent_zeroed_get(extent));
extent_ring_remove(extent);

View File

@ -195,7 +195,7 @@ extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
pszind_t pind = psz2ind_clamp(psz);
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
@ -207,7 +207,7 @@ extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
extent_t *extent)
{
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
pszind_t pind = psz2ind(psz);
pszind_t pind = psz2ind_clamp(psz);
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
@ -364,6 +364,7 @@ extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
pind = psz2ind(extent_size_quantize_ceil(size));
assert(pind < NPSIZES);
for (i = pind; i < NPSIZES; i++) {
extent_t *extent = extent_heap_first(&extent_heaps[i]);
if (extent != NULL)
@ -419,13 +420,16 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* course cannot be recycled).
*/
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
assert(pad == 0);
assert(alignment <= PAGE);
prev = extent_lookup(tsdn, (void *)((uintptr_t)new_addr - PAGE),
false);
assert(prev == NULL || extent_past_get(prev) == new_addr);
}
size = usize + pad;
alloc_size = s2u(size + PAGE_CEILING(alignment) - PAGE);
alloc_size = (new_addr != NULL) ? size : s2u(size +
PAGE_CEILING(alignment) - PAGE);
/* Beware size_t wrap-around. */
if (alloc_size < usize)
return (NULL);