SC: Make some key size classes static.

The largest small class, smallest large class, and largest large class may all
be needed down fast paths; to avoid the risk of touching another cache line, we
can make them available as constants.
This commit is contained in:
David Goldblatt
2018-07-11 16:05:58 -07:00
committed by David Goldblatt
parent 5112d9e5fd
commit 55e5cc1341
18 changed files with 129 additions and 96 deletions

View File

@@ -111,7 +111,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= sc_data_global.small_maxclass)) {
if (likely(size <= SC_SMALL_MAXCLASS)) {
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
@@ -263,7 +263,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
static inline void
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= sc_data_global.large_maxclass);
assert(size <= SC_LARGE_MAXCLASS);
szind_t szind;
bool slab;
@@ -309,7 +309,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= sc_data_global.large_maxclass);
assert(size <= SC_LARGE_MAXCLASS);
if (unlikely(tcache == NULL)) {
arena_sdalloc_no_tcache(tsdn, ptr, size);

View File

@@ -142,7 +142,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t usize, copysize;
usize = sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);

View File

@@ -57,15 +57,15 @@ prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
do {
a1 = (a0 >= sc_data_global.large_minclass - usize)
? a0 - (sc_data_global.large_minclass - usize) : 0;
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &prof_accum->mtx);
a0 = prof_accum->accumbytes;
a1 = (a0 >= sc_data_global.large_minclass - usize)
? a0 - (sc_data_global.large_minclass - usize) : 0;
a1 = (a0 >= SC_LARGE_MINCLASS - usize)
? a0 - (SC_LARGE_MINCLASS - usize) : 0;
prof_accum->accumbytes = a1;
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
#endif

View File

@@ -238,6 +238,25 @@
/* The largest size class in the lookup table. */
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
/* The smallest size class not allocated out of a slab. */
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
/* Internal; only used for the definition of SC_LARGE_MAXCLASS. */
#define SC_MAX_BASE ((size_t)1 << (SC_PTR_BITS - 2))
#define SC_MAX_DELTA ((size_t)1 << (SC_PTR_BITS - 2 - SC_LG_NGROUP))
/* The largest size class supported. */
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
typedef struct sc_s sc_t;
struct sc_s {
/* Size class index, or -1 if not a valid size class. */

View File

@@ -51,7 +51,7 @@ extern void sz_boot(const sc_data_t *sc_data);
JEMALLOC_ALWAYS_INLINE pszind_t
sz_psz2ind(size_t psz) {
if (unlikely(psz > sc_data_global.large_maxclass)) {
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
return sc_data_global.npsizes;
}
pszind_t x = lg_floor((psz<<1)-1);
@@ -73,7 +73,7 @@ sz_psz2ind(size_t psz) {
static inline size_t
sz_pind2sz_compute(pszind_t pind) {
if (unlikely(pind == sc_data_global.npsizes)) {
return sc_data_global.large_maxclass + PAGE;
return SC_LARGE_MAXCLASS + PAGE;
}
size_t grp = pind >> SC_LG_NGROUP;
size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
@@ -105,8 +105,8 @@ sz_pind2sz(pszind_t pind) {
static inline size_t
sz_psz2u(size_t psz) {
if (unlikely(psz > sc_data_global.large_maxclass)) {
return sc_data_global.large_maxclass + PAGE;
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
return SC_LARGE_MAXCLASS + PAGE;
}
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
@@ -119,7 +119,7 @@ sz_psz2u(size_t psz) {
static inline szind_t
sz_size2index_compute(size_t size) {
if (unlikely(size > sc_data_global.large_maxclass)) {
if (unlikely(size > SC_LARGE_MAXCLASS)) {
return SC_NSIZES;
}
#if (SC_NTINY != 0)
@@ -207,7 +207,7 @@ sz_index2size(szind_t index) {
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute(size_t size) {
if (unlikely(size > sc_data_global.large_maxclass)) {
if (unlikely(size > SC_LARGE_MAXCLASS)) {
return 0;
}
#if (SC_NTINY > 0)
@@ -262,7 +262,7 @@ sz_sa2u(size_t size, size_t alignment) {
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= sc_data_global.small_maxclass && alignment < PAGE) {
if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
@@ -278,20 +278,20 @@ sz_sa2u(size_t size, size_t alignment) {
* 192 | 11000000 | 64
*/
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
if (usize < sc_data_global.large_minclass) {
if (usize < SC_LARGE_MINCLASS) {
return usize;
}
}
/* Large size class. Beware of overflow. */
if (unlikely(alignment > sc_data_global.large_maxclass)) {
if (unlikely(alignment > SC_LARGE_MAXCLASS)) {
return 0;
}
/* Make sure result is a large size class. */
if (size <= sc_data_global.large_minclass) {
usize = sc_data_global.large_minclass;
if (size <= SC_LARGE_MINCLASS) {
usize = SC_LARGE_MINCLASS;
} else {
usize = sz_s2u(size);
if (usize < size) {

View File

@@ -167,7 +167,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= sc_data_global.small_maxclass);
<= SC_SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
@@ -193,7 +193,7 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> sc_data_global.small_maxclass);
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {