Fix psz/pind edge cases.
Add an "over-size" extent heap in which to store extents which exceed the maximum size class (plus cache-oblivious padding, if enabled). Remove psz2ind_clamp() and use psz2ind() instead so that trying to allocate the maximum size class can in principle succeed. In practice, this allows assertions to hold so that OOM errors can be successfully generated.
This commit is contained in:
parent
8dd5ea87ca
commit
ea9961acdb
@ -212,8 +212,8 @@ struct arena_s {
|
|||||||
* Heaps of extents that were previously allocated. These are used when
|
* Heaps of extents that were previously allocated. These are used when
|
||||||
* allocating extents, in an attempt to re-use address space.
|
* allocating extents, in an attempt to re-use address space.
|
||||||
*/
|
*/
|
||||||
extent_heap_t extents_cached[NPSIZES];
|
extent_heap_t extents_cached[NPSIZES+1];
|
||||||
extent_heap_t extents_retained[NPSIZES];
|
extent_heap_t extents_retained[NPSIZES+1];
|
||||||
/*
|
/*
|
||||||
* Ring sentinel used to track unused dirty memory. Dirty memory is
|
* Ring sentinel used to track unused dirty memory. Dirty memory is
|
||||||
* managed as an LRU of cached extents.
|
* managed as an LRU of cached extents.
|
||||||
|
@ -434,7 +434,7 @@ extern arena_t **arenas;
|
|||||||
* pind2sz_tab encodes the same information as could be computed by
|
* pind2sz_tab encodes the same information as could be computed by
|
||||||
* pind2sz_compute().
|
* pind2sz_compute().
|
||||||
*/
|
*/
|
||||||
extern size_t const pind2sz_tab[NPSIZES];
|
extern size_t const pind2sz_tab[NPSIZES+1];
|
||||||
/*
|
/*
|
||||||
* index2size_tab encodes the same information as could be computed (at
|
* index2size_tab encodes the same information as could be computed (at
|
||||||
* unacceptable cost in some code paths) by index2size_compute().
|
* unacceptable cost in some code paths) by index2size_compute().
|
||||||
@ -516,9 +516,7 @@ void jemalloc_postfork_child(void);
|
|||||||
#include "jemalloc/internal/large.h"
|
#include "jemalloc/internal/large.h"
|
||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
pszind_t psz2ind_impl(size_t psz, bool clamp);
|
|
||||||
pszind_t psz2ind(size_t psz);
|
pszind_t psz2ind(size_t psz);
|
||||||
pszind_t psz2ind_clamp(size_t psz);
|
|
||||||
size_t pind2sz_compute(pszind_t pind);
|
size_t pind2sz_compute(pszind_t pind);
|
||||||
size_t pind2sz_lookup(pszind_t pind);
|
size_t pind2sz_lookup(pszind_t pind);
|
||||||
size_t pind2sz(pszind_t pind);
|
size_t pind2sz(pszind_t pind);
|
||||||
@ -544,11 +542,11 @@ ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
|
|||||||
|
|
||||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||||
psz2ind_impl(size_t psz, bool clamp)
|
psz2ind(size_t psz)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (unlikely(psz > LARGE_MAXCLASS))
|
if (unlikely(psz > LARGE_MAXCLASS))
|
||||||
return (clamp ? NPSIZES-1 : NPSIZES);
|
return (NPSIZES);
|
||||||
{
|
{
|
||||||
pszind_t x = lg_floor((psz<<1)-1);
|
pszind_t x = lg_floor((psz<<1)-1);
|
||||||
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
|
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
|
||||||
@ -567,24 +565,12 @@ psz2ind_impl(size_t psz, bool clamp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE pszind_t
|
|
||||||
psz2ind(size_t psz)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (psz2ind_impl(psz, false));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE pszind_t
|
|
||||||
psz2ind_clamp(size_t psz)
|
|
||||||
{
|
|
||||||
|
|
||||||
return (psz2ind_impl(psz, true));
|
|
||||||
}
|
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
pind2sz_compute(pszind_t pind)
|
pind2sz_compute(pszind_t pind)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
if (unlikely(pind == NPSIZES))
|
||||||
|
return (LARGE_MAXCLASS + PAGE);
|
||||||
{
|
{
|
||||||
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
|
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
|
||||||
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||||
@ -614,7 +600,7 @@ JEMALLOC_INLINE size_t
|
|||||||
pind2sz(pszind_t pind)
|
pind2sz(pszind_t pind)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(pind < NPSIZES);
|
assert(pind < NPSIZES+1);
|
||||||
return (pind2sz_lookup(pind));
|
return (pind2sz_lookup(pind));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -623,7 +609,7 @@ psz2u(size_t psz)
|
|||||||
{
|
{
|
||||||
|
|
||||||
if (unlikely(psz > LARGE_MAXCLASS))
|
if (unlikely(psz > LARGE_MAXCLASS))
|
||||||
return (0);
|
return (LARGE_MAXCLASS + PAGE);
|
||||||
{
|
{
|
||||||
size_t x = lg_floor((psz<<1)-1);
|
size_t x = lg_floor((psz<<1)-1);
|
||||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
|
||||||
|
@ -380,8 +380,6 @@ prof_thread_active_set
|
|||||||
prof_thread_name_get
|
prof_thread_name_get
|
||||||
prof_thread_name_set
|
prof_thread_name_set
|
||||||
psz2ind
|
psz2ind
|
||||||
psz2ind_clamp
|
|
||||||
psz2ind_impl
|
|
||||||
psz2u
|
psz2u
|
||||||
rtree_child_read
|
rtree_child_read
|
||||||
rtree_child_read_hard
|
rtree_child_read_hard
|
||||||
|
@ -1696,7 +1696,7 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
WITNESS_RANK_ARENA_LARGE))
|
WITNESS_RANK_ARENA_LARGE))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
for (i = 0; i < NPSIZES; i++) {
|
for (i = 0; i < NPSIZES+1; i++) {
|
||||||
extent_heap_new(&arena->extents_cached[i]);
|
extent_heap_new(&arena->extents_cached[i]);
|
||||||
extent_heap_new(&arena->extents_retained[i]);
|
extent_heap_new(&arena->extents_retained[i]);
|
||||||
}
|
}
|
||||||
|
21
src/extent.c
21
src/extent.c
@ -45,7 +45,7 @@ static size_t highpages;
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
static void extent_record(tsdn_t *tsdn, arena_t *arena,
|
static void extent_record(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_hooks_t **r_extent_hooks, extent_heap_t extent_heaps[NPSIZES],
|
extent_hooks_t **r_extent_hooks, extent_heap_t extent_heaps[NPSIZES+1],
|
||||||
bool cache, extent_t *extent);
|
bool cache, extent_t *extent);
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -190,11 +190,11 @@ extent_ad_comp(const extent_t *a, const extent_t *b)
|
|||||||
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)
|
ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_ad_comp)
|
||||||
|
|
||||||
static void
|
static void
|
||||||
extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
|
extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
|
||||||
extent_t *extent)
|
extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
||||||
pszind_t pind = psz2ind_clamp(psz);
|
pszind_t pind = psz2ind(psz);
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
|
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
|
||||||
|
|
||||||
@ -202,11 +202,11 @@ extent_heaps_insert(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES],
|
extent_heaps_remove(tsdn_t *tsdn, extent_heap_t extent_heaps[NPSIZES+1],
|
||||||
extent_t *extent)
|
extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
||||||
pszind_t pind = psz2ind_clamp(psz);
|
pszind_t pind = psz2ind(psz);
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
|
malloc_mutex_assert_owner(tsdn, &extent_arena_get(extent)->extents_mtx);
|
||||||
|
|
||||||
@ -358,15 +358,14 @@ extent_deregister(tsdn_t *tsdn, extent_t *extent)
|
|||||||
*/
|
*/
|
||||||
static extent_t *
|
static extent_t *
|
||||||
extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
|
extent_first_best_fit(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_heap_t extent_heaps[NPSIZES], size_t size)
|
extent_heap_t extent_heaps[NPSIZES+1], size_t size)
|
||||||
{
|
{
|
||||||
pszind_t pind, i;
|
pszind_t pind, i;
|
||||||
|
|
||||||
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
|
malloc_mutex_assert_owner(tsdn, &arena->extents_mtx);
|
||||||
|
|
||||||
pind = psz2ind(extent_size_quantize_ceil(size));
|
pind = psz2ind(extent_size_quantize_ceil(size));
|
||||||
assert(pind < NPSIZES);
|
for (i = pind; i < NPSIZES+1; i++) {
|
||||||
for (i = pind; i < NPSIZES; i++) {
|
|
||||||
extent_t *extent = extent_heap_first(&extent_heaps[i]);
|
extent_t *extent = extent_heap_first(&extent_heaps[i]);
|
||||||
if (extent != NULL)
|
if (extent != NULL)
|
||||||
return (extent);
|
return (extent);
|
||||||
@ -393,7 +392,7 @@ extent_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
|||||||
|
|
||||||
static extent_t *
|
static extent_t *
|
||||||
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||||
extent_heap_t extent_heaps[NPSIZES], bool locked, bool cache,
|
extent_heap_t extent_heaps[NPSIZES+1], bool locked, bool cache,
|
||||||
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
|
void *new_addr, size_t usize, size_t pad, size_t alignment, bool *zero,
|
||||||
bool *commit, bool slab)
|
bool *commit, bool slab)
|
||||||
{
|
{
|
||||||
@ -758,7 +757,7 @@ extent_can_coalesce(const extent_t *a, const extent_t *b)
|
|||||||
static void
|
static void
|
||||||
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
|
extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
|
||||||
extent_heap_t extent_heaps[NPSIZES], bool cache)
|
extent_heap_t extent_heaps[NPSIZES+1], bool cache)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (!extent_can_coalesce(a, b))
|
if (!extent_can_coalesce(a, b))
|
||||||
@ -786,7 +785,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
|
|||||||
|
|
||||||
static void
|
static void
|
||||||
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||||
extent_heap_t extent_heaps[NPSIZES], bool cache, extent_t *extent)
|
extent_heap_t extent_heaps[NPSIZES+1], bool cache, extent_t *extent)
|
||||||
{
|
{
|
||||||
extent_t *prev, *next;
|
extent_t *prev, *next;
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
rtree_ctx_t rtree_ctx_fallback;
|
||||||
|
@ -83,7 +83,7 @@ enum {
|
|||||||
static uint8_t malloc_slow_flags;
|
static uint8_t malloc_slow_flags;
|
||||||
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
const size_t pind2sz_tab[NPSIZES] = {
|
const size_t pind2sz_tab[NPSIZES+1] = {
|
||||||
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
||||||
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
||||||
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
||||||
@ -93,6 +93,7 @@ const size_t pind2sz_tab[NPSIZES] = {
|
|||||||
#undef PSZ_yes
|
#undef PSZ_yes
|
||||||
#undef PSZ_no
|
#undef PSZ_no
|
||||||
#undef SC
|
#undef SC
|
||||||
|
(LARGE_MAXCLASS + PAGE)
|
||||||
};
|
};
|
||||||
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
|
@ -82,19 +82,18 @@ TEST_END
|
|||||||
|
|
||||||
TEST_BEGIN(test_psize_classes)
|
TEST_BEGIN(test_psize_classes)
|
||||||
{
|
{
|
||||||
size_t size_class, max_size_class;
|
size_t size_class, max_psz;
|
||||||
pszind_t pind, max_pind;
|
pszind_t pind, max_pind;
|
||||||
|
|
||||||
max_size_class = get_max_size_class();
|
max_psz = get_max_size_class() + PAGE;
|
||||||
max_pind = psz2ind(max_size_class);
|
max_pind = psz2ind(max_psz);
|
||||||
|
|
||||||
for (pind = 0, size_class = pind2sz(pind); pind < max_pind ||
|
for (pind = 0, size_class = pind2sz(pind); pind < max_pind || size_class
|
||||||
size_class < max_size_class; pind++, size_class =
|
< max_psz; pind++, size_class = pind2sz(pind)) {
|
||||||
pind2sz(pind)) {
|
|
||||||
assert_true(pind < max_pind,
|
assert_true(pind < max_pind,
|
||||||
"Loop conditionals should be equivalent; pind=%u, "
|
"Loop conditionals should be equivalent; pind=%u, "
|
||||||
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
||||||
assert_true(size_class < max_size_class,
|
assert_true(size_class < max_psz,
|
||||||
"Loop conditionals should be equivalent; pind=%u, "
|
"Loop conditionals should be equivalent; pind=%u, "
|
||||||
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
"size_class=%zu (%#zx)", pind, size_class, size_class);
|
||||||
|
|
||||||
@ -125,7 +124,7 @@ TEST_BEGIN(test_psize_classes)
|
|||||||
|
|
||||||
assert_u_eq(pind, psz2ind(pind2sz(pind)),
|
assert_u_eq(pind, psz2ind(pind2sz(pind)),
|
||||||
"psz2ind() does not reverse pind2sz()");
|
"psz2ind() does not reverse pind2sz()");
|
||||||
assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)),
|
assert_zu_eq(max_psz, pind2sz(psz2ind(max_psz)),
|
||||||
"pind2sz() does not reverse psz2ind()");
|
"pind2sz() does not reverse psz2ind()");
|
||||||
|
|
||||||
assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
|
assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1),
|
||||||
@ -139,9 +138,10 @@ TEST_END
|
|||||||
|
|
||||||
TEST_BEGIN(test_overflow)
|
TEST_BEGIN(test_overflow)
|
||||||
{
|
{
|
||||||
size_t max_size_class;
|
size_t max_size_class, max_psz;
|
||||||
|
|
||||||
max_size_class = get_max_size_class();
|
max_size_class = get_max_size_class();
|
||||||
|
max_psz = max_size_class + PAGE;
|
||||||
|
|
||||||
assert_u_eq(size2index(max_size_class+1), NSIZES,
|
assert_u_eq(size2index(max_size_class+1), NSIZES,
|
||||||
"size2index() should return NSIZES on overflow");
|
"size2index() should return NSIZES on overflow");
|
||||||
@ -164,12 +164,14 @@ TEST_BEGIN(test_overflow)
|
|||||||
assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
|
assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES,
|
||||||
"psz2ind() should return NPSIZES on overflow");
|
"psz2ind() should return NPSIZES on overflow");
|
||||||
|
|
||||||
assert_zu_eq(psz2u(max_size_class+1), 0,
|
assert_zu_eq(psz2u(max_size_class+1), max_psz,
|
||||||
"psz2u() should return 0 for unsupported size");
|
"psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
|
||||||
assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0,
|
" size");
|
||||||
"psz2u() should return 0 for unsupported size");
|
assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), max_psz,
|
||||||
assert_zu_eq(psz2u(SIZE_T_MAX), 0,
|
"psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported "
|
||||||
"psz2u() should return 0 on overflow");
|
"size");
|
||||||
|
assert_zu_eq(psz2u(SIZE_T_MAX), max_psz,
|
||||||
|
"psz2u() should return (LARGE_MAXCLASS + PAGE) on overflow");
|
||||||
}
|
}
|
||||||
TEST_END
|
TEST_END
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user