SC: Make some key size classes static.
The largest small class, smallest large class, and largest large class may all be needed down fast paths; to avoid the risk of touching another cache line, we can make them available as constants.
This commit is contained in:
parent
5112d9e5fd
commit
55e5cc1341
@ -111,7 +111,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
assert(size != 0);
|
||||
|
||||
if (likely(tcache != NULL)) {
|
||||
if (likely(size <= sc_data_global.small_maxclass)) {
|
||||
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
||||
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path);
|
||||
}
|
||||
@ -263,7 +263,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||
static inline void
|
||||
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
|
||||
assert(ptr != NULL);
|
||||
assert(size <= sc_data_global.large_maxclass);
|
||||
assert(size <= SC_LARGE_MAXCLASS);
|
||||
|
||||
szind_t szind;
|
||||
bool slab;
|
||||
@ -309,7 +309,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
||||
alloc_ctx_t *alloc_ctx, bool slow_path) {
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
assert(size <= sc_data_global.large_maxclass);
|
||||
assert(size <= SC_LARGE_MAXCLASS);
|
||||
|
||||
if (unlikely(tcache == NULL)) {
|
||||
arena_sdalloc_no_tcache(tsdn, ptr, size);
|
||||
|
@ -142,7 +142,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
size_t usize, copysize;
|
||||
|
||||
usize = sz_sa2u(size, alignment);
|
||||
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
|
@ -57,15 +57,15 @@ prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
|
||||
#ifdef JEMALLOC_ATOMIC_U64
|
||||
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
|
||||
do {
|
||||
a1 = (a0 >= sc_data_global.large_minclass - usize)
|
||||
? a0 - (sc_data_global.large_minclass - usize) : 0;
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
|
||||
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
|
||||
#else
|
||||
malloc_mutex_lock(tsdn, &prof_accum->mtx);
|
||||
a0 = prof_accum->accumbytes;
|
||||
a1 = (a0 >= sc_data_global.large_minclass - usize)
|
||||
? a0 - (sc_data_global.large_minclass - usize) : 0;
|
||||
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
|
||||
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
|
||||
prof_accum->accumbytes = a1;
|
||||
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
|
||||
#endif
|
||||
|
@ -238,6 +238,25 @@
|
||||
/* The largest size class in the lookup table. */
|
||||
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
|
||||
|
||||
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
|
||||
#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
|
||||
#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
|
||||
|
||||
/* The largest size class allocated out of a slab. */
|
||||
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
|
||||
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
|
||||
|
||||
/* The smallest size class not allocated out of a slab. */
|
||||
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
|
||||
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
|
||||
|
||||
/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
|
||||
#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
|
||||
#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
|
||||
|
||||
/* The largest size class supported. */
|
||||
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
|
||||
|
||||
typedef struct sc_s sc_t;
|
||||
struct sc_s {
|
||||
/* Size class index, or -1 if not a valid size class. */
|
||||
|
@ -51,7 +51,7 @@ extern void sz_boot(const sc_data_t *sc_data);
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE pszind_t
|
||||
sz_psz2ind(size_t psz) {
|
||||
if (unlikely(psz > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
|
||||
return sc_data_global.npsizes;
|
||||
}
|
||||
pszind_t x = lg_floor((psz<<1)-1);
|
||||
@ -73,7 +73,7 @@ sz_psz2ind(size_t psz) {
|
||||
static inline size_t
|
||||
sz_pind2sz_compute(pszind_t pind) {
|
||||
if (unlikely(pind == sc_data_global.npsizes)) {
|
||||
return sc_data_global.large_maxclass + PAGE;
|
||||
return SC_LARGE_MAXCLASS + PAGE;
|
||||
}
|
||||
size_t grp = pind >> SC_LG_NGROUP;
|
||||
size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
|
||||
@ -105,8 +105,8 @@ sz_pind2sz(pszind_t pind) {
|
||||
|
||||
static inline size_t
|
||||
sz_psz2u(size_t psz) {
|
||||
if (unlikely(psz > sc_data_global.large_maxclass)) {
|
||||
return sc_data_global.large_maxclass + PAGE;
|
||||
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
|
||||
return SC_LARGE_MAXCLASS + PAGE;
|
||||
}
|
||||
size_t x = lg_floor((psz<<1)-1);
|
||||
size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
|
||||
@ -119,7 +119,7 @@ sz_psz2u(size_t psz) {
|
||||
|
||||
static inline szind_t
|
||||
sz_size2index_compute(size_t size) {
|
||||
if (unlikely(size > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||
return SC_NSIZES;
|
||||
}
|
||||
#if (SC_NTINY != 0)
|
||||
@ -207,7 +207,7 @@ sz_index2size(szind_t index) {
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
sz_s2u_compute(size_t size) {
|
||||
if (unlikely(size > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||
return 0;
|
||||
}
|
||||
#if (SC_NTINY > 0)
|
||||
@ -262,7 +262,7 @@ sz_sa2u(size_t size, size_t alignment) {
|
||||
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
||||
|
||||
/* Try for a small size class. */
|
||||
if (size <= sc_data_global.small_maxclass && alignment < PAGE) {
|
||||
if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
|
||||
/*
|
||||
* Round size up to the nearest multiple of alignment.
|
||||
*
|
||||
@ -278,20 +278,20 @@ sz_sa2u(size_t size, size_t alignment) {
|
||||
* 192 | 11000000 | 64
|
||||
*/
|
||||
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
|
||||
if (usize < sc_data_global.large_minclass) {
|
||||
if (usize < SC_LARGE_MINCLASS) {
|
||||
return usize;
|
||||
}
|
||||
}
|
||||
|
||||
/* Large size class. Beware of overflow. */
|
||||
|
||||
if (unlikely(alignment > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Make sure result is a large size class. */
|
||||
if (size <= sc_data_global.large_minclass) {
|
||||
usize = sc_data_global.large_minclass;
|
||||
if (size <= SC_LARGE_MINCLASS) {
|
||||
usize = SC_LARGE_MINCLASS;
|
||||
} else {
|
||||
usize = sz_s2u(size);
|
||||
if (usize < size) {
|
||||
|
@ -167,7 +167,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
cache_bin_info_t *bin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
|
||||
<= sc_data_global.small_maxclass);
|
||||
<= SC_SMALL_MAXCLASS);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
|
||||
@ -193,7 +193,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
cache_bin_info_t *bin_info;
|
||||
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
|
||||
> sc_data_global.small_maxclass);
|
||||
> SC_SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
||||
|
50
src/arena.c
50
src/arena.c
@ -296,8 +296,8 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
if (usize < sc_data_global.large_minclass) {
|
||||
usize = sc_data_global.large_minclass;
|
||||
if (usize < SC_LARGE_MINCLASS) {
|
||||
usize = SC_LARGE_MINCLASS;
|
||||
}
|
||||
index = sz_size2index(usize);
|
||||
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
|
||||
@ -312,8 +312,8 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
|
||||
|
||||
cassert(config_stats);
|
||||
|
||||
if (usize < sc_data_global.large_minclass) {
|
||||
usize = sc_data_global.large_minclass;
|
||||
if (usize < SC_LARGE_MINCLASS) {
|
||||
usize = SC_LARGE_MINCLASS;
|
||||
}
|
||||
index = sz_size2index(usize);
|
||||
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
|
||||
@ -1389,7 +1389,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (likely(size <= sc_data_global.small_maxclass)) {
|
||||
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
||||
return arena_malloc_small(tsdn, arena, ind, zero);
|
||||
}
|
||||
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
|
||||
@ -1400,7 +1400,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero, tcache_t *tcache) {
|
||||
void *ret;
|
||||
|
||||
if (usize <= sc_data_global.small_maxclass
|
||||
if (usize <= SC_SMALL_MAXCLASS
|
||||
&& (alignment < PAGE
|
||||
|| (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
|
||||
/* Small; alignment doesn't require special slab placement. */
|
||||
@ -1420,8 +1420,8 @@ void
|
||||
arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(isalloc(tsdn, ptr) == sc_data_global.large_minclass);
|
||||
assert(usize <= sc_data_global.small_maxclass);
|
||||
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
||||
assert(usize <= SC_SMALL_MAXCLASS);
|
||||
|
||||
rtree_ctx_t rtree_ctx_fallback;
|
||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
||||
@ -1451,9 +1451,9 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
|
||||
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
|
||||
SC_NBINS, false);
|
||||
|
||||
assert(isalloc(tsdn, ptr) == sc_data_global.large_minclass);
|
||||
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
||||
|
||||
return sc_data_global.large_minclass;
|
||||
return SC_LARGE_MINCLASS;
|
||||
}
|
||||
|
||||
void
|
||||
@ -1594,25 +1594,25 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, bool zero, size_t *newsize) {
|
||||
bool ret;
|
||||
/* Calls with non-zero extra had to clamp extra. */
|
||||
assert(extra == 0 || size + extra <= sc_data_global.large_maxclass);
|
||||
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
|
||||
|
||||
extent_t *extent = iealloc(tsdn, ptr);
|
||||
if (unlikely(size > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto done;
|
||||
}
|
||||
|
||||
size_t usize_min = sz_s2u(size);
|
||||
size_t usize_max = sz_s2u(size + extra);
|
||||
if (likely(oldsize <= sc_data_global.small_maxclass && usize_min
|
||||
<= sc_data_global.small_maxclass)) {
|
||||
if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min
|
||||
<= SC_SMALL_MAXCLASS)) {
|
||||
/*
|
||||
* Avoid moving the allocation if the size class can be left the
|
||||
* same.
|
||||
*/
|
||||
assert(bin_infos[sz_size2index(oldsize)].reg_size ==
|
||||
oldsize);
|
||||
if ((usize_max > sc_data_global.small_maxclass
|
||||
if ((usize_max > SC_SMALL_MAXCLASS
|
||||
|| sz_size2index(usize_max) != sz_size2index(oldsize))
|
||||
&& (size > oldsize || usize_max < oldsize)) {
|
||||
ret = true;
|
||||
@ -1621,8 +1621,8 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||
|
||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||
ret = false;
|
||||
} else if (oldsize >= sc_data_global.large_minclass
|
||||
&& usize_max >= sc_data_global.large_minclass) {
|
||||
} else if (oldsize >= SC_LARGE_MINCLASS
|
||||
&& usize_max >= SC_LARGE_MINCLASS) {
|
||||
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
|
||||
zero);
|
||||
} else {
|
||||
@ -1643,7 +1643,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
zero, tcache, true);
|
||||
}
|
||||
usize = sz_sa2u(usize, alignment);
|
||||
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
||||
@ -1654,11 +1654,11 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
||||
hook_ralloc_args_t *hook_args) {
|
||||
size_t usize = sz_s2u(size);
|
||||
if (unlikely(usize == 0 || size > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (likely(usize <= sc_data_global.small_maxclass)) {
|
||||
if (likely(usize <= SC_SMALL_MAXCLASS)) {
|
||||
/* Try to avoid moving the allocation. */
|
||||
UNUSED size_t newsize;
|
||||
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
|
||||
@ -1671,8 +1671,8 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||
}
|
||||
}
|
||||
|
||||
if (oldsize >= sc_data_global.large_minclass
|
||||
&& usize >= sc_data_global.large_minclass) {
|
||||
if (oldsize >= SC_LARGE_MINCLASS
|
||||
&& usize >= SC_LARGE_MINCLASS) {
|
||||
return large_ralloc(tsdn, arena, ptr, usize,
|
||||
alignment, zero, tcache, hook_args);
|
||||
}
|
||||
@ -1985,10 +1985,10 @@ arena_init_huge(void) {
|
||||
bool huge_enabled;
|
||||
|
||||
/* The threshold should be large size class. */
|
||||
if (opt_huge_threshold > sc_data_global.large_maxclass ||
|
||||
opt_huge_threshold < sc_data_global.large_minclass) {
|
||||
if (opt_huge_threshold > SC_LARGE_MAXCLASS ||
|
||||
opt_huge_threshold < SC_LARGE_MINCLASS) {
|
||||
opt_huge_threshold = 0;
|
||||
huge_threshold = sc_data_global.large_maxclass + PAGE;
|
||||
huge_threshold = SC_LARGE_MAXCLASS + PAGE;
|
||||
huge_enabled = false;
|
||||
} else {
|
||||
/* Reserve the index for the huge arena. */
|
||||
|
@ -276,7 +276,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
|
||||
lg_curcells++;
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0
|
||||
|| usize > sc_data_global.large_maxclass)) {
|
||||
|| usize > SC_LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
@ -321,7 +321,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
|
||||
lg_prevbuckets = ckh->lg_curbuckets;
|
||||
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
return;
|
||||
}
|
||||
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
|
||||
@ -397,7 +397,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
|
||||
ckh->keycomp = keycomp;
|
||||
|
||||
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
|
||||
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||
ret = true;
|
||||
goto label_return;
|
||||
}
|
||||
|
@ -259,7 +259,7 @@ extent_size_quantize_ceil(size_t size) {
|
||||
size_t ret;
|
||||
|
||||
assert(size > 0);
|
||||
assert(size - sz_large_pad <= sc_data_global.large_maxclass);
|
||||
assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
|
||||
ret = extent_size_quantize_floor(size);
|
||||
@ -1625,7 +1625,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
if (!extents->delay_coalesce) {
|
||||
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
|
||||
rtree_ctx, extents, extent, NULL, growing_retained);
|
||||
} else if (extent_size_get(extent) >= sc_data_global.large_minclass) {
|
||||
} else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
|
||||
/* Always coalesce large extents eagerly. */
|
||||
bool coalesced;
|
||||
size_t prev_size;
|
||||
@ -1637,7 +1637,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
||||
&coalesced, growing_retained);
|
||||
} while (coalesced &&
|
||||
extent_size_get(extent)
|
||||
>= prev_size + sc_data_global.large_minclass);
|
||||
>= prev_size + SC_LARGE_MINCLASS);
|
||||
}
|
||||
extent_deactivate_locked(tsdn, arena, extents, extent);
|
||||
|
||||
|
@ -1201,8 +1201,8 @@ malloc_conf_init(void) {
|
||||
/* Experimental feature. Will be documented later.*/
|
||||
CONF_HANDLE_SIZE_T(opt_huge_threshold,
|
||||
"experimental_huge_threshold",
|
||||
sc_data_global.large_minclass,
|
||||
sc_data_global.large_maxclass, yes, yes, false)
|
||||
SC_LARGE_MINCLASS,
|
||||
SC_LARGE_MAXCLASS, yes, yes, false)
|
||||
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
|
||||
"lg_extent_max_active_fit", 0,
|
||||
(sizeof(size_t) << 3), yes, yes, false)
|
||||
@ -1827,13 +1827,13 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||
szind_t ind_large;
|
||||
size_t bumped_usize = usize;
|
||||
|
||||
if (usize <= sc_data_global.small_maxclass) {
|
||||
if (usize <= SC_SMALL_MAXCLASS) {
|
||||
assert(((dopts->alignment == 0) ?
|
||||
sz_s2u(sc_data_global.large_minclass) :
|
||||
sz_sa2u(sc_data_global.large_minclass, dopts->alignment))
|
||||
== sc_data_global.large_minclass);
|
||||
ind_large = sz_size2index(sc_data_global.large_minclass);
|
||||
bumped_usize = sz_s2u(sc_data_global.large_minclass);
|
||||
sz_s2u(SC_LARGE_MINCLASS) :
|
||||
sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
|
||||
== SC_LARGE_MINCLASS);
|
||||
ind_large = sz_size2index(SC_LARGE_MINCLASS);
|
||||
bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
|
||||
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
|
||||
bumped_usize, ind_large);
|
||||
if (unlikely(ret == NULL)) {
|
||||
@ -1942,12 +1942,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
||||
if (config_stats || (config_prof && opt_prof)) {
|
||||
usize = sz_index2size(ind);
|
||||
assert(usize > 0 && usize
|
||||
<= sc_data_global.large_maxclass);
|
||||
<= SC_LARGE_MAXCLASS);
|
||||
}
|
||||
} else {
|
||||
usize = sz_sa2u(size, dopts->alignment);
|
||||
if (unlikely(usize == 0
|
||||
|| usize > sc_data_global.large_maxclass)) {
|
||||
|| usize > SC_LARGE_MAXCLASS)) {
|
||||
goto label_oom;
|
||||
}
|
||||
}
|
||||
@ -1984,7 +1984,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
||||
alloc_ctx_t alloc_ctx;
|
||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
||||
alloc_ctx.slab = (usize
|
||||
<= sc_data_global.small_maxclass);
|
||||
<= SC_SMALL_MAXCLASS);
|
||||
allocation = imalloc_no_sample(
|
||||
sopts, dopts, tsd, usize, usize, ind);
|
||||
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
||||
@ -2282,9 +2282,9 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
|
||||
if (tctx == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (usize <= sc_data_global.small_maxclass) {
|
||||
if (usize <= SC_SMALL_MAXCLASS) {
|
||||
p = iralloc(tsd, old_ptr, old_usize,
|
||||
sc_data_global.large_minclass, 0, false, hook_args);
|
||||
SC_LARGE_MINCLASS, 0, false, hook_args);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
@ -2474,7 +2474,7 @@ je_realloc(void *ptr, size_t arg_size) {
|
||||
if (config_prof && opt_prof) {
|
||||
usize = sz_s2u(size);
|
||||
if (unlikely(usize == 0
|
||||
|| usize > sc_data_global.large_maxclass)) {
|
||||
|| usize > SC_LARGE_MAXCLASS)) {
|
||||
ret = NULL;
|
||||
} else {
|
||||
ret = irealloc_prof(tsd, ptr, old_usize, usize,
|
||||
@ -2787,9 +2787,9 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
|
||||
if (tctx == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (usize <= sc_data_global.small_maxclass) {
|
||||
if (usize <= SC_SMALL_MAXCLASS) {
|
||||
p = iralloct(tsdn, old_ptr, old_usize,
|
||||
sc_data_global.large_minclass, alignment, zero, tcache,
|
||||
SC_LARGE_MINCLASS, alignment, zero, tcache,
|
||||
arena, hook_args);
|
||||
if (p == NULL) {
|
||||
return NULL;
|
||||
@ -2900,7 +2900,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
|
||||
usize = (alignment == 0) ?
|
||||
sz_s2u(size) : sz_sa2u(size, alignment);
|
||||
if (unlikely(usize == 0
|
||||
|| usize > sc_data_global.large_maxclass)) {
|
||||
|| usize > SC_LARGE_MAXCLASS)) {
|
||||
goto label_oom;
|
||||
}
|
||||
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
|
||||
@ -2986,18 +2986,18 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
|
||||
if (alignment == 0) {
|
||||
usize_max = sz_s2u(size+extra);
|
||||
assert(usize_max > 0
|
||||
&& usize_max <= sc_data_global.large_maxclass);
|
||||
&& usize_max <= SC_LARGE_MAXCLASS);
|
||||
} else {
|
||||
usize_max = sz_sa2u(size+extra, alignment);
|
||||
if (unlikely(usize_max == 0
|
||||
|| usize_max > sc_data_global.large_maxclass)) {
|
||||
|| usize_max > SC_LARGE_MAXCLASS)) {
|
||||
/*
|
||||
* usize_max is out of range, and chances are that
|
||||
* allocation will fail, but use the maximum possible
|
||||
* value and carry on with prof_alloc_prep(), just in
|
||||
* case allocation succeeds.
|
||||
*/
|
||||
usize_max = sc_data_global.large_maxclass;
|
||||
usize_max = SC_LARGE_MAXCLASS;
|
||||
}
|
||||
}
|
||||
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
|
||||
@ -3046,18 +3046,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
|
||||
/*
|
||||
* The API explicitly absolves itself of protecting against (size +
|
||||
* extra) numerical overflow, but we may need to clamp extra to avoid
|
||||
* exceeding sc_data_global.large_maxclass.
|
||||
* exceeding SC_LARGE_MAXCLASS.
|
||||
*
|
||||
* Ordinarily, size limit checking is handled deeper down, but here we
|
||||
* have to check as part of (size + extra) clamping, since we need the
|
||||
* clamped value in the above helper functions.
|
||||
*/
|
||||
if (unlikely(size > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(size > SC_LARGE_MAXCLASS)) {
|
||||
usize = old_usize;
|
||||
goto label_not_resized;
|
||||
}
|
||||
if (unlikely(sc_data_global.large_maxclass - size < extra)) {
|
||||
extra = sc_data_global.large_maxclass - size;
|
||||
if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
|
||||
extra = SC_LARGE_MAXCLASS - size;
|
||||
}
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
@ -3244,7 +3244,7 @@ je_nallocx(size_t size, int flags) {
|
||||
check_entry_exit_locking(tsdn);
|
||||
|
||||
usize = inallocx(tsdn, size, flags);
|
||||
if (unlikely(usize > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(usize > SC_LARGE_MAXCLASS)) {
|
||||
LOG("core.nallocx.exit", "result: %zu", ZU(0));
|
||||
return 0;
|
||||
}
|
||||
|
14
src/large.c
14
src/large.c
@ -28,7 +28,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||
|
||||
ausize = sz_sa2u(usize, alignment);
|
||||
if (unlikely(ausize == 0 || ausize > sc_data_global.large_maxclass)) {
|
||||
if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -221,10 +221,10 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize_min > 0 && usize_max <= sc_data_global.large_maxclass);
|
||||
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(oldusize >= sc_data_global.large_minclass
|
||||
&& usize_max >= sc_data_global.large_minclass);
|
||||
assert(oldusize >= SC_LARGE_MINCLASS
|
||||
&& usize_max >= SC_LARGE_MINCLASS);
|
||||
|
||||
if (usize_max > oldusize) {
|
||||
/* Attempt to expand the allocation in-place. */
|
||||
@ -278,10 +278,10 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
|
||||
|
||||
size_t oldusize = extent_usize_get(extent);
|
||||
/* The following should have been caught by callers. */
|
||||
assert(usize > 0 && usize <= sc_data_global.large_maxclass);
|
||||
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
|
||||
/* Both allocation sizes must be large to avoid a move. */
|
||||
assert(oldusize >= sc_data_global.large_minclass
|
||||
&& usize >= sc_data_global.large_minclass);
|
||||
assert(oldusize >= SC_LARGE_MINCLASS
|
||||
&& usize >= SC_LARGE_MINCLASS);
|
||||
|
||||
/* Try to avoid moving the allocation. */
|
||||
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
|
||||
|
14
src/sc.c
14
src/sc.c
@ -228,6 +228,20 @@ size_classes(
|
||||
sc_data->lg_large_minclass = lg_large_minclass;
|
||||
sc_data->large_minclass = (ZU(1) << lg_large_minclass);
|
||||
sc_data->large_maxclass = large_maxclass;
|
||||
|
||||
/*
|
||||
* We compute these values in two ways:
|
||||
* - Incrementally, as above.
|
||||
* - In macros, in sc.h.
|
||||
* The computation is easier when done incrementally, but putting it in
|
||||
* a constant makes it available to the fast paths without having to
|
||||
* touch the extra global cacheline. We assert, however, that the two
|
||||
* computations are equivalent.
|
||||
*/
|
||||
assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS);
|
||||
assert(sc_data->large_minclass == SC_LARGE_MINCLASS);
|
||||
assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
|
||||
assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -658,8 +658,8 @@ bool
|
||||
tcache_boot(tsdn_t *tsdn) {
|
||||
/* If necessary, clamp opt_lg_tcache_max. */
|
||||
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
|
||||
sc_data_global.small_maxclass) {
|
||||
tcache_maxclass = sc_data_global.small_maxclass;
|
||||
SC_SMALL_MAXCLASS) {
|
||||
tcache_maxclass = SC_SMALL_MAXCLASS;
|
||||
} else {
|
||||
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
|
||||
}
|
||||
|
@ -123,13 +123,13 @@ test_junk(size_t sz_min, size_t sz_max) {
|
||||
|
||||
TEST_BEGIN(test_junk_small) {
|
||||
test_skip_if(!config_fill);
|
||||
test_junk(1, sc_data_global.small_maxclass - 1);
|
||||
test_junk(1, SC_SMALL_MAXCLASS - 1);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_junk_large) {
|
||||
test_skip_if(!config_fill);
|
||||
test_junk(sc_data_global.small_maxclass + 1,
|
||||
test_junk(SC_SMALL_MAXCLASS + 1,
|
||||
(1U << (sc_data_global.lg_large_minclass + 1)));
|
||||
}
|
||||
TEST_END
|
||||
|
@ -721,7 +721,7 @@ TEST_BEGIN(test_arenas_lextent_constants) {
|
||||
} while (0)
|
||||
|
||||
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
|
||||
sc_data_global.large_minclass);
|
||||
SC_LARGE_MINCLASS);
|
||||
|
||||
#undef TEST_ARENAS_LEXTENT_CONSTANT
|
||||
}
|
||||
|
@ -85,8 +85,8 @@ TEST_END
|
||||
|
||||
TEST_BEGIN(test_rtree_extrema) {
|
||||
extent_t extent_a, extent_b;
|
||||
extent_init(&extent_a, NULL, NULL, sc_data_global.large_minclass, false,
|
||||
sz_size2index(sc_data_global.large_minclass), 0,
|
||||
extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false,
|
||||
sz_size2index(SC_LARGE_MINCLASS), 0,
|
||||
extent_state_active, false, false, true);
|
||||
extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
|
||||
extent_state_active, false, false, true);
|
||||
|
@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_large) {
|
||||
size_t sz;
|
||||
int expected = config_stats ? 0 : ENOENT;
|
||||
|
||||
p = mallocx(sc_data_global.small_maxclass + 1, MALLOCX_ARENA(0));
|
||||
p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0));
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
|
||||
@ -74,7 +74,7 @@ TEST_BEGIN(test_stats_arenas_summary) {
|
||||
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
|
||||
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
|
||||
|
||||
little = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0));
|
||||
little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
|
||||
assert_ptr_not_null(little, "Unexpected mallocx() failure");
|
||||
large = mallocx((1U << sc_data_global.lg_large_minclass),
|
||||
MALLOCX_ARENA(0));
|
||||
@ -149,7 +149,7 @@ TEST_BEGIN(test_stats_arenas_small) {
|
||||
|
||||
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
|
||||
|
||||
p = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0));
|
||||
p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0));
|
||||
assert_ptr_not_null(p, "Unexpected mallocx() failure");
|
||||
|
||||
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
|
||||
|
@ -41,13 +41,13 @@ test_zero(size_t sz_min, size_t sz_max) {
|
||||
|
||||
TEST_BEGIN(test_zero_small) {
|
||||
test_skip_if(!config_fill);
|
||||
test_zero(1, sc_data_global.small_maxclass - 1);
|
||||
test_zero(1, SC_SMALL_MAXCLASS - 1);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
TEST_BEGIN(test_zero_large) {
|
||||
test_skip_if(!config_fill);
|
||||
test_zero(sc_data_global.small_maxclass + 1,
|
||||
test_zero(SC_SMALL_MAXCLASS + 1,
|
||||
1U << (sc_data_global.lg_large_minclass + 1));
|
||||
}
|
||||
TEST_END
|
||||
|
Loading…
Reference in New Issue
Block a user