From 55e5cc1341de87ad06254d719946a5ecd05f06ab Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Wed, 11 Jul 2018 16:05:58 -0700 Subject: [PATCH] SC: Make some key size classes static. The largest small class, smallest large class, and largest large class may all be needed down fast paths; to avoid the risk of touching another cache line, we can make them available as constants. --- include/jemalloc/internal/arena_inlines_b.h | 6 +-- .../internal/jemalloc_internal_inlines_c.h | 2 +- include/jemalloc/internal/prof_inlines_a.h | 8 +-- include/jemalloc/internal/sc.h | 19 +++++++ include/jemalloc/internal/sz.h | 22 ++++---- include/jemalloc/internal/tcache_inlines.h | 4 +- src/arena.c | 50 +++++++++---------- src/ckh.c | 6 +-- src/extent.c | 6 +-- src/jemalloc.c | 50 +++++++++---------- src/large.c | 14 +++--- src/sc.c | 14 ++++++ src/tcache.c | 4 +- test/unit/junk.c | 4 +- test/unit/mallctl.c | 2 +- test/unit/rtree.c | 4 +- test/unit/stats.c | 6 +-- test/unit/zero.c | 4 +- 18 files changed, 129 insertions(+), 96 deletions(-) diff --git a/include/jemalloc/internal/arena_inlines_b.h b/include/jemalloc/internal/arena_inlines_b.h index 89603966..2b3915ae 100644 --- a/include/jemalloc/internal/arena_inlines_b.h +++ b/include/jemalloc/internal/arena_inlines_b.h @@ -111,7 +111,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, assert(size != 0); if (likely(tcache != NULL)) { - if (likely(size <= sc_data_global.small_maxclass)) { + if (likely(size <= SC_SMALL_MAXCLASS)) { return tcache_alloc_small(tsdn_tsd(tsdn), arena, tcache, size, ind, zero, slow_path); } @@ -263,7 +263,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, static inline void arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { assert(ptr != NULL); - assert(size <= sc_data_global.large_maxclass); + assert(size <= SC_LARGE_MAXCLASS); szind_t szind; bool slab; @@ -309,7 +309,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, alloc_ctx_t *alloc_ctx, bool slow_path) { assert(!tsdn_null(tsdn) || tcache == NULL); assert(ptr != NULL); - assert(size <= sc_data_global.large_maxclass); + assert(size <= SC_LARGE_MAXCLASS); if (unlikely(tcache == NULL)) { arena_sdalloc_no_tcache(tsdn, ptr, size); diff --git a/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/include/jemalloc/internal/jemalloc_internal_inlines_c.h index 83ad10ff..9c5fec62 100644 --- a/include/jemalloc/internal/jemalloc_internal_inlines_c.h +++ b/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -142,7 +142,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t usize, copysize; usize = sz_sa2u(size, alignment); - if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) { + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return NULL; } p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); diff --git a/include/jemalloc/internal/prof_inlines_a.h b/include/jemalloc/internal/prof_inlines_a.h index 07bfd9f3..471d9853 100644 --- a/include/jemalloc/internal/prof_inlines_a.h +++ b/include/jemalloc/internal/prof_inlines_a.h @@ -57,15 +57,15 @@ prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, #ifdef JEMALLOC_ATOMIC_U64 a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); do { - a1 = (a0 >= sc_data_global.large_minclass - usize) - ? a0 - (sc_data_global.large_minclass - usize) : 0; + a1 = (a0 >= SC_LARGE_MINCLASS - usize) + ? a0 - (SC_LARGE_MINCLASS - usize) : 0; } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); #else malloc_mutex_lock(tsdn, &prof_accum->mtx); a0 = prof_accum->accumbytes; - a1 = (a0 >= sc_data_global.large_minclass - usize) - ? a0 - (sc_data_global.large_minclass - usize) : 0; + a1 = (a0 >= SC_LARGE_MINCLASS - usize) + ? a0 - (SC_LARGE_MINCLASS - usize) : 0; prof_accum->accumbytes = a1; malloc_mutex_unlock(tsdn, &prof_accum->mtx); #endif diff --git a/include/jemalloc/internal/sc.h b/include/jemalloc/internal/sc.h index 592115a7..5c94378c 100644 --- a/include/jemalloc/internal/sc.h +++ b/include/jemalloc/internal/sc.h @@ -238,6 +238,25 @@ /* The largest size class in the lookup table. */ #define SC_LOOKUP_MAXCLASS ((size_t)1 << 12) +/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */ +#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1)) +#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1)) + +/* The largest size class allocated out of a slab. */ +#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \ + + (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA) + +/* The smallest size class not allocated out of a slab. */ +#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP)) +#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP) + +/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */ +#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2)) +#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP)) + +/* The largest size class supported. */ +#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA) + typedef struct sc_s sc_t; struct sc_s { /* Size class index, or -1 if not a valid size class. */ diff --git a/include/jemalloc/internal/sz.h b/include/jemalloc/internal/sz.h index b37e7969..e743d878 100644 --- a/include/jemalloc/internal/sz.h +++ b/include/jemalloc/internal/sz.h @@ -51,7 +51,7 @@ extern void sz_boot(const sc_data_t *sc_data); JEMALLOC_ALWAYS_INLINE pszind_t sz_psz2ind(size_t psz) { - if (unlikely(psz > sc_data_global.large_maxclass)) { + if (unlikely(psz > SC_LARGE_MAXCLASS)) { return sc_data_global.npsizes; } pszind_t x = lg_floor((psz<<1)-1); @@ -73,7 +73,7 @@ sz_psz2ind(size_t psz) { static inline size_t sz_pind2sz_compute(pszind_t pind) { if (unlikely(pind == sc_data_global.npsizes)) { - return sc_data_global.large_maxclass + PAGE; + return SC_LARGE_MAXCLASS + PAGE; } size_t grp = pind >> SC_LG_NGROUP; size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1); @@ -105,8 +105,8 @@ sz_pind2sz(pszind_t pind) { static inline size_t sz_psz2u(size_t psz) { - if (unlikely(psz > sc_data_global.large_maxclass)) { - return sc_data_global.large_maxclass + PAGE; + if (unlikely(psz > SC_LARGE_MAXCLASS)) { + return SC_LARGE_MAXCLASS + PAGE; } size_t x = lg_floor((psz<<1)-1); size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ? @@ -119,7 +119,7 @@ sz_psz2u(size_t psz) { static inline szind_t sz_size2index_compute(size_t size) { - if (unlikely(size > sc_data_global.large_maxclass)) { + if (unlikely(size > SC_LARGE_MAXCLASS)) { return SC_NSIZES; } #if (SC_NTINY != 0) @@ -207,7 +207,7 @@ sz_index2size(szind_t index) { JEMALLOC_ALWAYS_INLINE size_t sz_s2u_compute(size_t size) { - if (unlikely(size > sc_data_global.large_maxclass)) { + if (unlikely(size > SC_LARGE_MAXCLASS)) { return 0; } #if (SC_NTINY > 0) @@ -262,7 +262,7 @@ sz_sa2u(size_t size, size_t alignment) { assert(alignment != 0 && ((alignment - 1) & alignment) == 0); /* Try for a small size class. */ - if (size <= sc_data_global.small_maxclass && alignment < PAGE) { + if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) { /* * Round size up to the nearest multiple of alignment. * @@ -278,20 +278,20 @@ sz_sa2u(size_t size, size_t alignment) { * 192 | 11000000 | 64 */ usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); - if (usize < sc_data_global.large_minclass) { + if (usize < SC_LARGE_MINCLASS) { return usize; } } /* Large size class. Beware of overflow. */ - if (unlikely(alignment > sc_data_global.large_maxclass)) { + if (unlikely(alignment > SC_LARGE_MAXCLASS)) { return 0; } /* Make sure result is a large size class. */ - if (size <= sc_data_global.large_minclass) { - usize = sc_data_global.large_minclass; + if (size <= SC_LARGE_MINCLASS) { + usize = SC_LARGE_MINCLASS; } else { usize = sz_s2u(size); if (usize < size) { diff --git a/include/jemalloc/internal/tcache_inlines.h b/include/jemalloc/internal/tcache_inlines.h index b060043b..7c956468 100644 --- a/include/jemalloc/internal/tcache_inlines.h +++ b/include/jemalloc/internal/tcache_inlines.h @@ -167,7 +167,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) - <= sc_data_global.small_maxclass); + <= SC_SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, &bin_infos[binind]); @@ -193,7 +193,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, cache_bin_info_t *bin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) - > sc_data_global.small_maxclass); + > SC_SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); if (slow_path && config_fill && unlikely(opt_junk_free)) { diff --git a/src/arena.c b/src/arena.c index 07d91039..91043cff 100644 --- a/src/arena.c +++ b/src/arena.c @@ -296,8 +296,8 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { cassert(config_stats); - if (usize < sc_data_global.large_minclass) { - usize = sc_data_global.large_minclass; + if (usize < SC_LARGE_MINCLASS) { + usize = SC_LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; @@ -312,8 +312,8 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { cassert(config_stats); - if (usize < sc_data_global.large_minclass) { - usize = sc_data_global.large_minclass; + if (usize < SC_LARGE_MINCLASS) { + usize = SC_LARGE_MINCLASS; } index = sz_size2index(usize); hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; @@ -1389,7 +1389,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, return NULL; } - if (likely(size <= sc_data_global.small_maxclass)) { + if (likely(size <= SC_SMALL_MAXCLASS)) { return arena_malloc_small(tsdn, arena, ind, zero); } return large_malloc(tsdn, arena, sz_index2size(ind), zero); @@ -1400,7 +1400,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; - if (usize <= sc_data_global.small_maxclass + if (usize <= SC_SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special slab placement. */ @@ -1420,8 +1420,8 @@ void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { cassert(config_prof); assert(ptr != NULL); - assert(isalloc(tsdn, ptr) == sc_data_global.large_minclass); - assert(usize <= sc_data_global.small_maxclass); + assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); + assert(usize <= SC_SMALL_MAXCLASS); rtree_ctx_t rtree_ctx_fallback; rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); @@ -1451,9 +1451,9 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, SC_NBINS, false); - assert(isalloc(tsdn, ptr) == sc_data_global.large_minclass); + assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); - return sc_data_global.large_minclass; + return SC_LARGE_MINCLASS; } void @@ -1594,25 +1594,25 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, bool zero, size_t *newsize) { bool ret; /* Calls with non-zero extra had to clamp extra. */ - assert(extra == 0 || size + extra <= sc_data_global.large_maxclass); + assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); extent_t *extent = iealloc(tsdn, ptr); - if (unlikely(size > sc_data_global.large_maxclass)) { + if (unlikely(size > SC_LARGE_MAXCLASS)) { ret = true; goto done; } size_t usize_min = sz_s2u(size); size_t usize_max = sz_s2u(size + extra); - if (likely(oldsize <= sc_data_global.small_maxclass && usize_min - <= sc_data_global.small_maxclass)) { + if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min + <= SC_SMALL_MAXCLASS)) { /* * Avoid moving the allocation if the size class can be left the * same. */ assert(bin_infos[sz_size2index(oldsize)].reg_size == oldsize); - if ((usize_max > sc_data_global.small_maxclass + if ((usize_max > SC_SMALL_MAXCLASS || sz_size2index(usize_max) != sz_size2index(oldsize)) && (size > oldsize || usize_max < oldsize)) { ret = true; @@ -1621,8 +1621,8 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, arena_decay_tick(tsdn, extent_arena_get(extent)); ret = false; - } else if (oldsize >= sc_data_global.large_minclass - && usize_max >= sc_data_global.large_minclass) { + } else if (oldsize >= SC_LARGE_MINCLASS + && usize_max >= SC_LARGE_MINCLASS) { ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, zero); } else { @@ -1643,7 +1643,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, zero, tcache, true); } usize = sz_sa2u(usize, alignment); - if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) { + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return NULL; } return ipalloct(tsdn, usize, alignment, zero, tcache, arena); @@ -1654,11 +1654,11 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache, hook_ralloc_args_t *hook_args) { size_t usize = sz_s2u(size); - if (unlikely(usize == 0 || size > sc_data_global.large_maxclass)) { + if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { return NULL; } - if (likely(usize <= sc_data_global.small_maxclass)) { + if (likely(usize <= SC_SMALL_MAXCLASS)) { /* Try to avoid moving the allocation. */ UNUSED size_t newsize; if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, @@ -1671,8 +1671,8 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, } } - if (oldsize >= sc_data_global.large_minclass - && usize >= sc_data_global.large_minclass) { + if (oldsize >= SC_LARGE_MINCLASS + && usize >= SC_LARGE_MINCLASS) { return large_ralloc(tsdn, arena, ptr, usize, alignment, zero, tcache, hook_args); } @@ -1985,10 +1985,10 @@ arena_init_huge(void) { bool huge_enabled; /* The threshold should be large size class. */ - if (opt_huge_threshold > sc_data_global.large_maxclass || - opt_huge_threshold < sc_data_global.large_minclass) { + if (opt_huge_threshold > SC_LARGE_MAXCLASS || + opt_huge_threshold < SC_LARGE_MINCLASS) { opt_huge_threshold = 0; - huge_threshold = sc_data_global.large_maxclass + PAGE; + huge_threshold = SC_LARGE_MAXCLASS + PAGE; huge_enabled = false; } else { /* Reserve the index for the huge arena. */ diff --git a/src/ckh.c b/src/ckh.c index 94c4fe69..1bf6df5a 100644 --- a/src/ckh.c +++ b/src/ckh.c @@ -276,7 +276,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) { lg_curcells++; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); if (unlikely(usize == 0 - || usize > sc_data_global.large_maxclass)) { + || usize > SC_LARGE_MAXCLASS)) { ret = true; goto label_return; } @@ -321,7 +321,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) { lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) { + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { return; } tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, @@ -397,7 +397,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh->keycomp = keycomp; usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) { + if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { ret = true; goto label_return; } diff --git a/src/extent.c b/src/extent.c index 0953940b..74076b66 100644 --- a/src/extent.c +++ b/src/extent.c @@ -259,7 +259,7 @@ extent_size_quantize_ceil(size_t size) { size_t ret; assert(size > 0); - assert(size - sz_large_pad <= sc_data_global.large_maxclass); + assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); assert((size & PAGE_MASK) == 0); ret = extent_size_quantize_floor(size); @@ -1625,7 +1625,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (!extents->delay_coalesce) { extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, NULL, growing_retained); - } else if (extent_size_get(extent) >= sc_data_global.large_minclass) { + } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { /* Always coalesce large extents eagerly. */ bool coalesced; size_t prev_size; @@ -1637,7 +1637,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, &coalesced, growing_retained); } while (coalesced && extent_size_get(extent) - >= prev_size + sc_data_global.large_minclass); + >= prev_size + SC_LARGE_MINCLASS); } extent_deactivate_locked(tsdn, arena, extents, extent); diff --git a/src/jemalloc.c b/src/jemalloc.c index 4ffe5aaa..e66735c8 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -1201,8 +1201,8 @@ malloc_conf_init(void) { /* Experimental feature. Will be documented later.*/ CONF_HANDLE_SIZE_T(opt_huge_threshold, "experimental_huge_threshold", - sc_data_global.large_minclass, - sc_data_global.large_maxclass, yes, yes, false) + SC_LARGE_MINCLASS, + SC_LARGE_MAXCLASS, yes, yes, false) CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, "lg_extent_max_active_fit", 0, (sizeof(size_t) << 3), yes, yes, false) @@ -1827,13 +1827,13 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, szind_t ind_large; size_t bumped_usize = usize; - if (usize <= sc_data_global.small_maxclass) { + if (usize <= SC_SMALL_MAXCLASS) { assert(((dopts->alignment == 0) ? - sz_s2u(sc_data_global.large_minclass) : - sz_sa2u(sc_data_global.large_minclass, dopts->alignment)) - == sc_data_global.large_minclass); - ind_large = sz_size2index(sc_data_global.large_minclass); - bumped_usize = sz_s2u(sc_data_global.large_minclass); + sz_s2u(SC_LARGE_MINCLASS) : + sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment)) + == SC_LARGE_MINCLASS); + ind_large = sz_size2index(SC_LARGE_MINCLASS); + bumped_usize = sz_s2u(SC_LARGE_MINCLASS); ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, bumped_usize, ind_large); if (unlikely(ret == NULL)) { @@ -1942,12 +1942,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { if (config_stats || (config_prof && opt_prof)) { usize = sz_index2size(ind); assert(usize > 0 && usize - <= sc_data_global.large_maxclass); + <= SC_LARGE_MAXCLASS); } } else { usize = sz_sa2u(size, dopts->alignment); if (unlikely(usize == 0 - || usize > sc_data_global.large_maxclass)) { + || usize > SC_LARGE_MAXCLASS)) { goto label_oom; } } @@ -1984,7 +1984,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { alloc_ctx_t alloc_ctx; if (likely((uintptr_t)tctx == (uintptr_t)1U)) { alloc_ctx.slab = (usize - <= sc_data_global.small_maxclass); + <= SC_SMALL_MAXCLASS); allocation = imalloc_no_sample( sopts, dopts, tsd, usize, usize, ind); } else if ((uintptr_t)tctx > (uintptr_t)1U) { @@ -2282,9 +2282,9 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, if (tctx == NULL) { return NULL; } - if (usize <= sc_data_global.small_maxclass) { + if (usize <= SC_SMALL_MAXCLASS) { p = iralloc(tsd, old_ptr, old_usize, - sc_data_global.large_minclass, 0, false, hook_args); + SC_LARGE_MINCLASS, 0, false, hook_args); if (p == NULL) { return NULL; } @@ -2474,7 +2474,7 @@ je_realloc(void *ptr, size_t arg_size) { if (config_prof && opt_prof) { usize = sz_s2u(size); if (unlikely(usize == 0 - || usize > sc_data_global.large_maxclass)) { + || usize > SC_LARGE_MAXCLASS)) { ret = NULL; } else { ret = irealloc_prof(tsd, ptr, old_usize, usize, @@ -2787,9 +2787,9 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, if (tctx == NULL) { return NULL; } - if (usize <= sc_data_global.small_maxclass) { + if (usize <= SC_SMALL_MAXCLASS) { p = iralloct(tsdn, old_ptr, old_usize, - sc_data_global.large_minclass, alignment, zero, tcache, + SC_LARGE_MINCLASS, alignment, zero, tcache, arena, hook_args); if (p == NULL) { return NULL; @@ -2900,7 +2900,7 @@ je_rallocx(void *ptr, size_t size, int flags) { usize = (alignment == 0) ? sz_s2u(size) : sz_sa2u(size, alignment); if (unlikely(usize == 0 - || usize > sc_data_global.large_maxclass)) { + || usize > SC_LARGE_MAXCLASS)) { goto label_oom; } p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, @@ -2986,18 +2986,18 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, if (alignment == 0) { usize_max = sz_s2u(size+extra); assert(usize_max > 0 - && usize_max <= sc_data_global.large_maxclass); + && usize_max <= SC_LARGE_MAXCLASS); } else { usize_max = sz_sa2u(size+extra, alignment); if (unlikely(usize_max == 0 - || usize_max > sc_data_global.large_maxclass)) { + || usize_max > SC_LARGE_MAXCLASS)) { /* * usize_max is out of range, and chances are that * allocation will fail, but use the maximum possible * value and carry on with prof_alloc_prep(), just in * case allocation succeeds. */ - usize_max = sc_data_global.large_maxclass; + usize_max = SC_LARGE_MAXCLASS; } } tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); @@ -3046,18 +3046,18 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) { /* * The API explicitly absolves itself of protecting against (size + * extra) numerical overflow, but we may need to clamp extra to avoid - * exceeding sc_data_global.large_maxclass. + * exceeding SC_LARGE_MAXCLASS. * * Ordinarily, size limit checking is handled deeper down, but here we * have to check as part of (size + extra) clamping, since we need the * clamped value in the above helper functions. */ - if (unlikely(size > sc_data_global.large_maxclass)) { + if (unlikely(size > SC_LARGE_MAXCLASS)) { usize = old_usize; goto label_not_resized; } - if (unlikely(sc_data_global.large_maxclass - size < extra)) { - extra = sc_data_global.large_maxclass - size; + if (unlikely(SC_LARGE_MAXCLASS - size < extra)) { + extra = SC_LARGE_MAXCLASS - size; } if (config_prof && opt_prof) { @@ -3244,7 +3244,7 @@ je_nallocx(size_t size, int flags) { check_entry_exit_locking(tsdn); usize = inallocx(tsdn, size, flags); - if (unlikely(usize > sc_data_global.large_maxclass)) { + if (unlikely(usize > SC_LARGE_MAXCLASS)) { LOG("core.nallocx.exit", "result: %zu", ZU(0)); return 0; } diff --git a/src/large.c b/src/large.c index 87d9ec0b..84073618 100644 --- a/src/large.c +++ b/src/large.c @@ -28,7 +28,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, assert(!tsdn_null(tsdn) || arena != NULL); ausize = sz_sa2u(usize, alignment); - if (unlikely(ausize == 0 || ausize > sc_data_global.large_maxclass)) { + if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) { return NULL; } @@ -221,10 +221,10 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, size_t oldusize = extent_usize_get(extent); /* The following should have been caught by callers. */ - assert(usize_min > 0 && usize_max <= sc_data_global.large_maxclass); + assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ - assert(oldusize >= sc_data_global.large_minclass - && usize_max >= sc_data_global.large_minclass); + assert(oldusize >= SC_LARGE_MINCLASS + && usize_max >= SC_LARGE_MINCLASS); if (usize_max > oldusize) { /* Attempt to expand the allocation in-place. */ @@ -278,10 +278,10 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, size_t oldusize = extent_usize_get(extent); /* The following should have been caught by callers. */ - assert(usize > 0 && usize <= sc_data_global.large_maxclass); + assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); /* Both allocation sizes must be large to avoid a move. */ - assert(oldusize >= sc_data_global.large_minclass - && usize >= sc_data_global.large_minclass); + assert(oldusize >= SC_LARGE_MINCLASS + && usize >= SC_LARGE_MINCLASS); /* Try to avoid moving the allocation. */ if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { diff --git a/src/sc.c b/src/sc.c index e8eef1c2..74c91018 100644 --- a/src/sc.c +++ b/src/sc.c @@ -228,6 +228,20 @@ size_classes( sc_data->lg_large_minclass = lg_large_minclass; sc_data->large_minclass = (ZU(1) << lg_large_minclass); sc_data->large_maxclass = large_maxclass; + + /* + * We compute these values in two ways: + * - Incrementally, as above. + * - In macros, in sc.h. + * The computation is easier when done incrementally, but putting it in + * a constant makes it available to the fast paths without having to + * touch the extra global cacheline. We assert, however, that the two + * computations are equivalent. + */ + assert(sc_data->small_maxclass == SC_SMALL_MAXCLASS); + assert(sc_data->large_minclass == SC_LARGE_MINCLASS); + assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS); + assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS); } void diff --git a/src/tcache.c b/src/tcache.c index edd047ab..7346df8c 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -658,8 +658,8 @@ bool tcache_boot(tsdn_t *tsdn) { /* If necessary, clamp opt_lg_tcache_max. */ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < - sc_data_global.small_maxclass) { - tcache_maxclass = sc_data_global.small_maxclass; + SC_SMALL_MAXCLASS) { + tcache_maxclass = SC_SMALL_MAXCLASS; } else { tcache_maxclass = (ZU(1) << opt_lg_tcache_max); } diff --git a/test/unit/junk.c b/test/unit/junk.c index 91c6e5b1..be8933a7 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -123,13 +123,13 @@ test_junk(size_t sz_min, size_t sz_max) { TEST_BEGIN(test_junk_small) { test_skip_if(!config_fill); - test_junk(1, sc_data_global.small_maxclass - 1); + test_junk(1, SC_SMALL_MAXCLASS - 1); } TEST_END TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); - test_junk(sc_data_global.small_maxclass + 1, + test_junk(SC_SMALL_MAXCLASS + 1, (1U << (sc_data_global.lg_large_minclass + 1))); } TEST_END diff --git a/test/unit/mallctl.c b/test/unit/mallctl.c index 230ecb0e..f6362008 100644 --- a/test/unit/mallctl.c +++ b/test/unit/mallctl.c @@ -721,7 +721,7 @@ TEST_BEGIN(test_arenas_lextent_constants) { } while (0) TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, - sc_data_global.large_minclass); + SC_LARGE_MINCLASS); #undef TEST_ARENAS_LEXTENT_CONSTANT } diff --git a/test/unit/rtree.c b/test/unit/rtree.c index 4d1daf2c..b017bc03 100644 --- a/test/unit/rtree.c +++ b/test/unit/rtree.c @@ -85,8 +85,8 @@ TEST_END TEST_BEGIN(test_rtree_extrema) { extent_t extent_a, extent_b; - extent_init(&extent_a, NULL, NULL, sc_data_global.large_minclass, false, - sz_size2index(sc_data_global.large_minclass), 0, + extent_init(&extent_a, NULL, NULL, SC_LARGE_MINCLASS, false, + sz_size2index(SC_LARGE_MINCLASS), 0, extent_state_active, false, false, true); extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0, extent_state_active, false, false, true); diff --git a/test/unit/stats.c b/test/unit/stats.c index 8fe0f3ad..b8f549be 100644 --- a/test/unit/stats.c +++ b/test/unit/stats.c @@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_large) { size_t sz; int expected = config_stats ? 0 : ENOENT; - p = mallocx(sc_data_global.small_maxclass + 1, MALLOCX_ARENA(0)); + p = mallocx(SC_SMALL_MAXCLASS + 1, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), @@ -74,7 +74,7 @@ TEST_BEGIN(test_stats_arenas_summary) { uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; - little = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0)); + little = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx((1U << sc_data_global.lg_large_minclass), MALLOCX_ARENA(0)); @@ -149,7 +149,7 @@ TEST_BEGIN(test_stats_arenas_small) { no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ - p = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0)); + p = mallocx(SC_SMALL_MAXCLASS, MALLOCX_ARENA(0)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), diff --git a/test/unit/zero.c b/test/unit/zero.c index 20a70628..8b8d2072 100644 --- a/test/unit/zero.c +++ b/test/unit/zero.c @@ -41,13 +41,13 @@ test_zero(size_t sz_min, size_t sz_max) { TEST_BEGIN(test_zero_small) { test_skip_if(!config_fill); - test_zero(1, sc_data_global.small_maxclass - 1); + test_zero(1, SC_SMALL_MAXCLASS - 1); } TEST_END TEST_BEGIN(test_zero_large) { test_skip_if(!config_fill); - test_zero(sc_data_global.small_maxclass + 1, + test_zero(SC_SMALL_MAXCLASS + 1, 1U << (sc_data_global.lg_large_minclass + 1)); } TEST_END