Hide size class computation behind a layer of indirection.

This class removes almost all the dependencies on size_classes.h, accessing the
data there only via the new module sc.h, which does not depend on any
configuration options.

In a subsequent commit, we'll remove the configure-time size class computations,
doing them at boot time, instead.
This commit is contained in:
David Goldblatt 2017-12-14 12:46:39 -08:00 committed by David Goldblatt
parent fb924dd7bf
commit e904f813b4
46 changed files with 886 additions and 459 deletions

View File

@ -114,6 +114,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/prof.c \
$(srcroot)src/rtree.c \
$(srcroot)src/stats.c \
$(srcroot)src/sc.c \
$(srcroot)src/sz.c \
$(srcroot)src/tcache.c \
$(srcroot)src/test_hooks.c \

View File

@ -5,7 +5,6 @@
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/stats.h"
extern ssize_t opt_dirty_decay_ms;

View File

@ -4,7 +4,7 @@
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
@ -111,7 +111,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
if (likely(size <= sc_data_global.small_maxclass)) {
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
tcache, size, ind, zero, slow_path);
}
@ -140,7 +140,7 @@ arena_salloc(tsdn_t *tsdn, const void *ptr) {
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != NSIZES);
assert(szind != SC_NSIZES);
return sz_index2size(szind);
}
@ -173,7 +173,7 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
/* Only slab members should be looked up via interior pointers. */
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
assert(szind != NSIZES);
assert(szind != SC_NSIZES);
return sz_index2size(szind);
}
@ -194,7 +194,7 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
}
@ -224,7 +224,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
if (alloc_ctx != NULL) {
szind = alloc_ctx->szind;
slab = alloc_ctx->slab;
assert(szind != NSIZES);
assert(szind != SC_NSIZES);
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
@ -236,7 +236,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == extent_szind_get(extent));
assert(szind < NSIZES);
assert(szind < SC_NSIZES);
assert(slab == extent_slab_get(extent));
}
@ -246,7 +246,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {
@ -263,7 +263,7 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
static inline void
arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
assert(size <= sc_data_global.large_maxclass);
szind_t szind;
bool slab;
@ -273,7 +273,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
slab = (szind < SC_NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
@ -285,7 +285,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == sz_size2index(size));
assert((config_prof && opt_prof) || slab == (szind < NBINS));
assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
if (config_debug) {
extent_t *extent = rtree_extent_read(tsdn,
@ -309,7 +309,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
alloc_ctx_t *alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= LARGE_MAXCLASS);
assert(size <= sc_data_global.large_maxclass);
if (unlikely(tcache == NULL)) {
arena_sdalloc_no_tcache(tsdn, ptr, size);
@ -339,7 +339,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
* object, so base szind and slab on the given size.
*/
szind = sz_size2index(size);
slab = (szind < NBINS);
slab = (szind < SC_NBINS);
}
if (config_debug) {
@ -358,7 +358,7 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
slow_path);
} else {
if (szind < nhbins) {
if (config_prof && unlikely(szind < NBINS)) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, tcache,
slow_path);
} else {

View File

@ -4,7 +4,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
@ -90,7 +90,7 @@ struct arena_stats_s {
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
/* One element for each large size class. */
arena_stats_large_t lstats[NSIZES - NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
/* Arena uptime. */
nstime_t uptime;
@ -225,7 +225,7 @@ arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
arena_stats_lock(tsdn, arena_stats);
arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
NBINS].nrequests, nrequests);
SC_NBINS].nrequests, nrequests);
arena_stats_unlock(tsdn, arena_stats);
}

View File

@ -10,7 +10,7 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/ticker.h"
@ -203,7 +203,7 @@ struct arena_s {
*
* Synchronization: internal.
*/
bin_t bins[NBINS];
bin_t bins[SC_NBINS];
/*
* Base allocator, from which arena metadata are allocated.

View File

@ -1,8 +1,10 @@
#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H
#define JEMALLOC_INTERNAL_ARENA_TYPES_H
#include "jemalloc/internal/sc.h"
/* Maximum number of regions in one slab. */
#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN)
#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
/* Default decay times in milliseconds. */

View File

@ -3,7 +3,7 @@
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
/* Embedded at the beginning of every block of base-managed virtual memory. */
struct base_block_s {
@ -46,7 +46,7 @@ struct base_s {
base_block_t *blocks;
/* Heap of extents that track unused trailing space within blocks. */
extent_heap_t avail[NSIZES];
extent_heap_t avail[SC_NSIZES];
/* Stats, only maintained if config_stats. */
size_t allocated;

View File

@ -1,10 +1,11 @@
#ifndef JEMALLOC_INTERNAL_BIN_H
#define JEMALLOC_INTERNAL_BIN_H
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/extent_types.h"
#include "jemalloc/internal/extent_structs.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/sc.h"
/*
* A bin contains a set of extents that are currently being used for slab
@ -48,7 +49,7 @@ struct bin_info_s {
bitmap_info_t bitmap_info;
};
extern const bin_info_t bin_infos[NBINS];
extern bin_info_t bin_infos[SC_NBINS];
typedef struct bin_s bin_t;
@ -78,6 +79,9 @@ struct bin_s {
bin_stats_t stats;
};
void bin_infos_init(sc_data_t *sc_data, bin_info_t bin_infos[SC_NBINS]);
void bin_boot();
/* Initializes a bin to empty. Returns true on error. */
bool bin_init(bin_t *bin);

View File

@ -162,4 +162,72 @@ lg_floor(size_t x) {
#undef BIT_UTIL_INLINE
/* A compile-time version of lg_ceil */
#define LG_CEIL(x) ( \
(x) <= (1ULL << 0ULL) ? 0 : \
(x) <= (1ULL << 1ULL) ? 1 : \
(x) <= (1ULL << 2ULL) ? 2 : \
(x) <= (1ULL << 3ULL) ? 3 : \
(x) <= (1ULL << 4ULL) ? 4 : \
(x) <= (1ULL << 5ULL) ? 5 : \
(x) <= (1ULL << 6ULL) ? 6 : \
(x) <= (1ULL << 7ULL) ? 7 : \
(x) <= (1ULL << 8ULL) ? 8 : \
(x) <= (1ULL << 9ULL) ? 9 : \
(x) <= (1ULL << 10ULL) ? 10 : \
(x) <= (1ULL << 11ULL) ? 11 : \
(x) <= (1ULL << 12ULL) ? 12 : \
(x) <= (1ULL << 13ULL) ? 13 : \
(x) <= (1ULL << 14ULL) ? 14 : \
(x) <= (1ULL << 15ULL) ? 15 : \
(x) <= (1ULL << 16ULL) ? 16 : \
(x) <= (1ULL << 17ULL) ? 17 : \
(x) <= (1ULL << 18ULL) ? 18 : \
(x) <= (1ULL << 19ULL) ? 19 : \
(x) <= (1ULL << 20ULL) ? 20 : \
(x) <= (1ULL << 21ULL) ? 21 : \
(x) <= (1ULL << 22ULL) ? 22 : \
(x) <= (1ULL << 23ULL) ? 23 : \
(x) <= (1ULL << 24ULL) ? 24 : \
(x) <= (1ULL << 25ULL) ? 25 : \
(x) <= (1ULL << 26ULL) ? 26 : \
(x) <= (1ULL << 27ULL) ? 27 : \
(x) <= (1ULL << 28ULL) ? 28 : \
(x) <= (1ULL << 29ULL) ? 29 : \
(x) <= (1ULL << 30ULL) ? 30 : \
(x) <= (1ULL << 31ULL) ? 31 : \
(x) <= (1ULL << 32ULL) ? 32 : \
(x) <= (1ULL << 33ULL) ? 33 : \
(x) <= (1ULL << 34ULL) ? 34 : \
(x) <= (1ULL << 35ULL) ? 35 : \
(x) <= (1ULL << 36ULL) ? 36 : \
(x) <= (1ULL << 37ULL) ? 37 : \
(x) <= (1ULL << 38ULL) ? 38 : \
(x) <= (1ULL << 39ULL) ? 39 : \
(x) <= (1ULL << 40ULL) ? 40 : \
(x) <= (1ULL << 41ULL) ? 41 : \
(x) <= (1ULL << 42ULL) ? 42 : \
(x) <= (1ULL << 43ULL) ? 43 : \
(x) <= (1ULL << 44ULL) ? 44 : \
(x) <= (1ULL << 45ULL) ? 45 : \
(x) <= (1ULL << 46ULL) ? 46 : \
(x) <= (1ULL << 47ULL) ? 47 : \
(x) <= (1ULL << 48ULL) ? 48 : \
(x) <= (1ULL << 49ULL) ? 49 : \
(x) <= (1ULL << 50ULL) ? 50 : \
(x) <= (1ULL << 51ULL) ? 51 : \
(x) <= (1ULL << 52ULL) ? 52 : \
(x) <= (1ULL << 53ULL) ? 53 : \
(x) <= (1ULL << 54ULL) ? 54 : \
(x) <= (1ULL << 55ULL) ? 55 : \
(x) <= (1ULL << 56ULL) ? 56 : \
(x) <= (1ULL << 57ULL) ? 57 : \
(x) <= (1ULL << 58ULL) ? 58 : \
(x) <= (1ULL << 59ULL) ? 59 : \
(x) <= (1ULL << 60ULL) ? 60 : \
(x) <= (1ULL << 61ULL) ? 61 : \
(x) <= (1ULL << 62ULL) ? 62 : \
(x) <= (1ULL << 63ULL) ? 63 : \
64)
#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */

View File

@ -3,18 +3,18 @@
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES
#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */
# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
#endif
#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS)

View File

@ -5,7 +5,7 @@
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex_prof.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/stats.h"
/* Maximum ctl tree depth. */
@ -40,8 +40,8 @@ typedef struct ctl_arena_stats_s {
uint64_t ndalloc_small;
uint64_t nrequests_small;
bin_stats_t bstats[NBINS];
arena_stats_large_t lstats[NSIZES - NBINS];
bin_stats_t bstats[SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
} ctl_arena_stats_t;
typedef struct ctl_stats_s {

View File

@ -6,6 +6,7 @@
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
static inline void
@ -53,14 +54,14 @@ static inline szind_t
extent_szind_get_maybe_invalid(const extent_t *extent) {
szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
EXTENT_BITS_SZIND_SHIFT);
assert(szind <= NSIZES);
assert(szind <= SC_NSIZES);
return szind;
}
static inline szind_t
extent_szind_get(const extent_t *extent) {
szind_t szind = extent_szind_get_maybe_invalid(extent);
assert(szind < NSIZES); /* Never call when "invalid". */
assert(szind < SC_NSIZES); /* Never call when "invalid". */
return szind;
}
@ -234,7 +235,7 @@ extent_bsize_set(extent_t *extent, size_t bsize) {
static inline void
extent_szind_set(extent_t *extent, szind_t szind) {
assert(szind <= NSIZES); /* NSIZES means "invalid". */
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
}
@ -327,7 +328,7 @@ extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
extent_addr_set(extent, addr);
extent_bsize_set(extent, bsize);
extent_slab_set(extent, false);
extent_szind_set(extent, NSIZES);
extent_szind_set(extent, SC_NSIZES);
extent_sn_set(extent, sn);
extent_state_set(extent, extent_state_active);
extent_zeroed_set(extent, true);

View File

@ -2,11 +2,12 @@
#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
typedef enum {
extent_state_active = 0,
@ -112,7 +113,7 @@ struct extent_s {
#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL_NSIZES
#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
@ -180,14 +181,14 @@ struct extents_s {
*
* Synchronization: mtx.
*/
extent_heap_t heaps[NPSIZES+1];
extent_heap_t heaps[SC_NPSIZES_MAX + 1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)];
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES_MAX + 1)];
/*
* LRU of all extents in heaps.

View File

@ -6,8 +6,6 @@ typedef struct extents_s extents_t;
#define EXTENT_HOOKS_INITIALIZER NULL
#define EXTENT_GROW_MAX_PIND (NPSIZES - 1)
/*
* When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
* is the max ratio between the size of the active extent and the new extent.

View File

@ -2,7 +2,6 @@
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/tsd_types.h"
/* TSD checks this to set thread local slow state accordingly. */

View File

@ -4,7 +4,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/ticker.h"
JEMALLOC_ALWAYS_INLINE malloc_cpuid_t
@ -108,14 +108,14 @@ decay_ticker_get(tsd_t *tsd, unsigned ind) {
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind < NBINS);
assert(binind < SC_NBINS);
return &tcache->bins_small[binind];
}
JEMALLOC_ALWAYS_INLINE cache_bin_t *
tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
assert(binind >= NBINS &&binind < nhbins);
return &tcache->bins_large[binind - NBINS];
assert(binind >= SC_NBINS &&binind < nhbins);
return &tcache->bins_large[binind - SC_NBINS];
}
JEMALLOC_ALWAYS_INLINE bool

View File

@ -142,7 +142,7 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t usize, copysize;
usize = sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
return NULL;
}
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);

View File

@ -57,15 +57,15 @@ prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
#ifdef JEMALLOC_ATOMIC_U64
a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
do {
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS -
usize) : 0;
a1 = (a0 >= sc_data_global.large_minclass - usize)
? a0 - (sc_data_global.large_minclass - usize) : 0;
} while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
#else
malloc_mutex_lock(tsdn, &prof_accum->mtx);
a0 = prof_accum->accumbytes;
a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) :
0;
a1 = (a0 >= sc_data_global.large_minclass - usize)
? a0 - (sc_data_global.large_minclass - usize) : 0;
prof_accum->accumbytes = a1;
malloc_mutex_unlock(tsdn, &prof_accum->mtx);
#endif

View File

@ -4,7 +4,7 @@
#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree_tsd.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/tsd.h"
/*
@ -31,7 +31,7 @@
# error Unsupported number of significant virtual address bits
#endif
/* Use compact leaf representation if virtual address encoding allows. */
#if RTREE_NHIB >= LG_CEIL_NSIZES
#if RTREE_NHIB >= LG_CEIL(SC_NSIZES)
# define RTREE_LEAF_COMPACT
#endif
@ -261,7 +261,7 @@ rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
static inline void
rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind) {
assert(szind <= NSIZES);
assert(szind <= SC_NSIZES);
#ifdef RTREE_LEAF_COMPACT
uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
@ -313,7 +313,7 @@ rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
static inline void
rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
assert(!slab || szind < SC_NBINS);
/*
* The caller implicitly assures that it is the only writer to the szind
@ -429,7 +429,7 @@ rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
dependent);
if (!dependent && elm == NULL) {
return NSIZES;
return SC_NSIZES;
}
return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
}
@ -474,7 +474,7 @@ rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
static inline void
rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key, szind_t szind, bool slab) {
assert(!slab || szind < NBINS);
assert(!slab || szind < SC_NBINS);
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
@ -486,7 +486,7 @@ rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
NULL);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false);
rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
}
#endif /* JEMALLOC_INTERNAL_RTREE_H */

View File

@ -0,0 +1,302 @@
#ifndef JEMALLOC_INTERNAL_SC_H
#define JEMALLOC_INTERNAL_SC_H
#include "jemalloc/internal/jemalloc_internal_types.h"
/*
* Size class computations:
*
* These are a little tricky; we'll first start by describing how things
* generally work, and then describe some of the details.
*
* Ignore the first few size classes for a moment. We can then split all the
* remaining size classes into groups. The size classes in a group are spaced
* such that they cover allocation request sizes in a power-of-2 range. The
* power of two is called the base of the group, and the size classes in it
* satisfy allocations in the half-open range (base, base * 2]. There are
* SC_NGROUP size classes in each group, equally spaced in the range, so that
* each one covers allocations for base / SC_NGROUP possible allocation sizes.
* We call that value (base / SC_NGROUP) the delta of the group. Each size class
* is delta larger than the one before it (including the initial size class in a
* group, which is delta large than 2**base, the largest size class in the
* previous group).
* To make the math all work out nicely, we require that SC_NGROUP is a power of
* two, and define it in terms of SC_LG_NGROUP. We'll often talk in terms of
* lg_base and lg_delta. For each of these groups then, we have that
* lg_delta == lg_base - SC_LG_NGROUP.
* The size classes in a group with a given lg_base and lg_delta (which, recall,
* can be computed from lg_base for these groups) are therefore:
* base + 1 * delta
* which covers allocations in (base, base + 1 * delta]
* base + 2 * delta
* which covers allocations in (base + 1 * delta, base + 2 * delta].
* base + 3 * delta
* which covers allocations in (base + 2 * delta, base + 3 * delta].
* ...
* base + SC_NGROUP * delta ( == 2 * base)
* which covers allocations in (base + (SC_NGROUP - 1) * delta, 2 * base].
* (Note that currently SC_NGROUP is always 4, so the "..." is empty in
* practice.)
* Note that the last size class in the group is the next power of two (after
* base), so that we've set up the induction correctly for the next group's
* selection of delta.
*
* Now, let's start considering the first few size classes. Two extra constants
* come into play here: LG_QUANTUM and SC_LG_TINY_MIN. LG_QUANTUM ensures
* correct platform alignment; all objects of size (1 << LG_QUANTUM) or larger
* are at least (1 << LG_QUANTUM) aligned; this can be used to ensure that we
* never return improperly aligned memory, by making (1 << LG_QUANTUM) equal the
* highest required alignment of a platform. For allocation sizes smaller than
* (1 << LG_QUANTUM) though, we can be more relaxed (since we don't support
* platforms with types with alignment larger than their size). To allow such
* allocations (without wasting space unnecessarily), we introduce tiny size
* classes; one per power of two, up until we hit the quantum size. There are
* therefore LG_QUANTUM - SC_LG_TINY_MIN such size classes.
*
* Next, we have a size class of size LG_QUANTUM. This can't be the start of a
* group in the sense we described above (covering a power of two range) since,
* if we divided into it to pick a value of delta, we'd get a delta smaller than
* (1 << LG_QUANTUM) for sizes >= (1 << LG_QUANTUM), which is against the rules.
*
* The first base we can divide by SC_NGROUP while still being at least
* (1 << LG_QUANTUM) is SC_NGROUP * (1 << LG_QUANTUM). We can get there by
* having SC_NGROUP size classes, spaced (1 << LG_QUANTUM) apart. These size
* classes are:
* 1 * (1 << LG_QUANTUM)
* 2 * (1 << LG_QUANTUM)
* 3 * (1 << LG_QUANTUM)
* ... (although, as above, this "..." is empty in practice)
* SC_NGROUP * (1 << LG_QUANTUM).
*
* There are SC_NGROUP of these size classes, so we can regard it as a sort of
* pseudo-group, even though it spans multiple powers of 2, is divided
* differently, and both starts and ends on a power of 2 (as opposed to just
* ending). SC_NGROUP is itself a power of two, so the first group after the
* pseudo-group has the power-of-two base SC_NGROUP * (1 << LG_QUANTUM), for a
* lg_base of LG_QUANTUM + SC_LG_NGROUP. We can divide this base into SC_NGROUP
* sizes without violating our LG_QUANTUM requirements, so we can safely set
* lg_delta = lg_base - SC_LG_GROUP (== LG_QUANTUM).
*
* So, in order, the size classes are:
*
* Tiny size classes:
* - Count: LG_QUANTUM - SC_LG_TINY_MIN.
* - Sizes:
* 1 << SC_LG_TINY_MIN
* 1 << (SC_LG_TINY_MIN + 1)
* 1 << (SC_LG_TINY_MIN + 2)
* ...
* 1 << (LG_QUANTUM - 1)
*
* Initial pseudo-group:
* - Count: SC_NGROUP
* - Sizes:
* 1 * (1 << LG_QUANTUM)
* 2 * (1 << LG_QUANTUM)
* 3 * (1 << LG_QUANTUM)
* ...
* SC_NGROUP * (1 << LG_QUANTUM)
*
* Regular group 0:
* - Count: SC_NGROUP
* - Sizes:
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP and lg_delta of
* lg_base - SC_LG_NGROUP)
* (1 << lg_base) + 1 * (1 << lg_delta)
* (1 << lg_base) + 2 * (1 << lg_delta)
* (1 << lg_base) + 3 * (1 << lg_delta)
* ...
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
*
* Regular group 1:
* - Count: SC_NGROUP
* - Sizes:
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + 1 and lg_delta of
* lg_base - SC_LG_NGROUP)
* (1 << lg_base) + 1 * (1 << lg_delta)
* (1 << lg_base) + 2 * (1 << lg_delta)
* (1 << lg_base) + 3 * (1 << lg_delta)
* ...
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
*
* ...
*
* Regular group N:
* - Count: SC_NGROUP
* - Sizes:
* (relative to lg_base of LG_QUANTUM + SC_LG_NGROUP + N and lg_delta of
* lg_base - SC_LG_NGROUP)
* (1 << lg_base) + 1 * (1 << lg_delta)
* (1 << lg_base) + 2 * (1 << lg_delta)
* (1 << lg_base) + 3 * (1 << lg_delta)
* ...
* (1 << lg_base) + SC_NGROUP * (1 << lg_delta) [ == (1 << (lg_base + 1)) ]
*
*
* Representation of metadata:
* To make the math easy, we'll mostly work in lg quantities. We record lg_base,
* lg_delta, and ndelta (i.e. number of deltas above the base) on a
* per-size-class basis, and maintain the invariant that, across all size
* classes, size == (1 << lg_base) + ndelta * (1 << lg_delta).
*
* For regular groups (i.e. those with lg_base >= LG_QUANTUM + SC_LG_NGROUP),
* lg_delta is lg_base - SC_LG_NGROUP, and ndelta goes from 1 to SC_NGROUP.
*
* For the initial tiny size classes (if any), lg_base is lg(size class size).
* lg_delta is lg_base for the first size class, and lg_base - 1 for all
* subsequent ones. ndelta is always 0.
*
* For the pseudo-group, if there are no tiny size classes, then we set
* lg_base == LG_QUANTUM, lg_delta == LG_QUANTUM, and have ndelta range from 0
* to SC_NGROUP - 1. (Note that delta == base, so base + (SC_NGROUP - 1) * delta
* is just SC_NGROUP * base, or (1 << (SC_LG_NGROUP + LG_QUANTUM)), so we do
* indeed get a power of two that way). If there *are* tiny size classes, then
* the first size class needs to have lg_delta relative to the largest tiny size
* class. We therefore set lg_base == LG_QUANTUM - 1,
* lg_delta == LG_QUANTUM - 1, and ndelta == 1, keeping the rest of the
* pseudo-group the same.
*
*
* Other terminology:
* "Small" size classes mean those that are allocated out of bins, which is the
* same as those that are slab allocated.
* "Large" size classes are those that are not small. The cutoff for counting as
* large is page size * group size.
*/
/*
* Size class N + (1 << SC_LG_NGROUP) twice the size of size class N.
*/
#define SC_LG_NGROUP 2
#define SC_LG_TINY_MIN 3
#if SC_LG_TINY_MIN == 0
/* The div module doesn't support division by 1, which this would require. */
#error "Unsupported LG_TINY_MIN"
#endif
/*
* The definitions below are all determined by the above settings and system
* characteristics.
*/
#define SC_NGROUP (1ULL << SC_LG_NGROUP)
#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
#define SC_NPSEUDO SC_NGROUP
#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
/*
* We cap allocations to be less than 2 ** (ptr_bits - 1), so the highest base
* we need is 2 ** (ptr_bits - 2). (This also means that the last group is 1
* size class shorter than the others).
* We could probably save some space in arenas by capping this at LG_VADDR size.
*/
#define SC_LG_BASE_MAX (SC_PTR_BITS - 2)
#define SC_NREGULAR (SC_NGROUP * \
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
/*
* The number of size classes that are at least a page in size. Note that
* because delta may be smaller than a page, this is not the same as the number
* of size classes that are *multiples* of the page size.
*/
#define SC_NPSIZES_MAX ( \
/* Start with all the size classes. */ \
SC_NSIZES \
/* Subtract out those groups with too small a base. */ \
- (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
/* And the pseudo-group. */ \
- SC_NPSEUDO \
/* And the tiny group. */ \
- SC_NTINY \
/* \
* In the lg_base == lg_page - 1 group, only the last sc is big \
* enough to make it to lg_page. \
*/ \
- (SC_NGROUP - 1))
/*
* We declare a size class is binnable if size < page size * group. Or, in other
* words, lg(size) < lg(page size) + lg(group size).
*/
#define SC_NBINS ( \
/* Sub-regular size classes. */ \
SC_NTINY + SC_NPSEUDO \
/* Groups with lg_regular_min_base <= lg_base <= lg_base_max */ \
+ SC_NGROUP * (LG_PAGE + SC_LG_NGROUP - SC_LG_FIRST_REGULAR_BASE) \
/* Last SC of the last group hits the bound exactly; exclude it. */ \
- 1)
/*
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes.
*/
#if (SC_NBINS > 256)
# error "Too many small size classes"
#endif
/* The largest size class in the lookup table. */
#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
typedef struct sc_s sc_t;
struct sc_s {
/* Size class index, or -1 if not a valid size class. */
int index;
/* Lg group base size (no deltas added). */
int lg_base;
/* Lg delta to previous size class. */
int lg_delta;
/* Delta multiplier. size == 1<<lg_base + ndelta<<lg_delta */
int ndelta;
/*
* True if the size class is a multiple of the page size, false
* otherwise.
*/
bool psz;
/*
* True if the size class is a small, bin, size class. False otherwise.
*/
bool bin;
/* The slab page count if a small bin size class, 0 otherwise. */
int pgs;
/* Same as lg_delta if a lookup table size class, 0 otherwise. */
int lg_delta_lookup;
};
typedef struct sc_data_s sc_data_t;
struct sc_data_s {
/* Number of tiny size classes. */
unsigned ntiny;
/* Number of bins supported by the lookup table. */
int nlbins;
/* Number of small size class bins. */
int nbins;
/* Number of size classes. */
int nsizes;
/* Number of bits required to store NSIZES. */
int lg_ceil_nsizes;
/* Number of size classes that are a multiple of (1U << LG_PAGE). */
unsigned npsizes;
/* Lg of maximum tiny size class (or -1, if none). */
int lg_tiny_maxclass;
/* Maximum size class included in lookup table. */
size_t lookup_maxclass;
/* Maximum small size class. */
size_t small_maxclass;
/* Lg of minimum large size class. */
int lg_large_minclass;
/* The minimum large size class. */
size_t large_minclass;
/* Maximum (large) size class. */
size_t large_maxclass;
/* True if the sc_data_t has been initialized (for debugging only). */
bool initialized;
sc_t sc[SC_NSIZES];
};
extern sc_data_t sc_data_global;
void sc_data_init(sc_data_t *data);
void sc_boot();
#endif /* JEMALLOC_INTERNAL_SC_H */

View File

@ -3,7 +3,7 @@
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
/*
@ -26,18 +26,18 @@
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern size_t const sz_pind2sz_tab[NPSIZES+1];
extern size_t sz_pind2sz_tab[SC_NPSIZES_MAX + 1];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
*/
extern size_t const sz_index2size_tab[NSIZES];
extern size_t sz_index2size_tab[SC_NSIZES];
/*
* sz_size2index_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via sz_size2index().
*/
extern uint8_t const sz_size2index_tab[];
extern uint8_t sz_size2index_tab[];
static const size_t sz_large_pad =
#ifdef JEMALLOC_CACHE_OBLIVIOUS
@ -47,41 +47,40 @@ static const size_t sz_large_pad =
#endif
;
extern void sz_boot(const sc_data_t *sc_data);
JEMALLOC_ALWAYS_INLINE pszind_t
sz_psz2ind(size_t psz) {
if (unlikely(psz > LARGE_MAXCLASS)) {
return NPSIZES;
if (unlikely(psz > sc_data_global.large_maxclass)) {
return sc_data_global.npsizes;
}
{
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x -
(LG_SIZE_CLASS_GROUP + LG_PAGE);
pszind_t grp = shift << LG_SIZE_CLASS_GROUP;
pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
0 : x - (SC_LG_NGROUP + LG_PAGE);
pszind_t grp = shift << SC_LG_NGROUP;
pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
LG_PAGE : x - SC_LG_NGROUP - 1;
size_t delta_inverse_mask = ZU(-1) << lg_delta;
pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
((ZU(1) << SC_LG_NGROUP) - 1);
pszind_t ind = grp + mod;
return ind;
}
}
static inline size_t
sz_pind2sz_compute(pszind_t pind) {
if (unlikely(pind == NPSIZES)) {
return LARGE_MAXCLASS + PAGE;
if (unlikely(pind == sc_data_global.npsizes)) {
return sc_data_global.large_maxclass + PAGE;
}
{
size_t grp = pind >> LG_SIZE_CLASS_GROUP;
size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
size_t grp = pind >> SC_LG_NGROUP;
size_t mod = pind & ((ZU(1) << SC_LG_NGROUP) - 1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_PAGE +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
size_t grp_size = ((ZU(1) << (LG_PAGE + (SC_LG_NGROUP-1))) << grp)
& grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_PAGE-1);
@ -90,7 +89,6 @@ sz_pind2sz_compute(pszind_t pind) {
size_t sz = grp_size + mod_size;
return sz;
}
}
static inline size_t
sz_pind2sz_lookup(pszind_t pind) {
@ -101,70 +99,67 @@ sz_pind2sz_lookup(pszind_t pind) {
static inline size_t
sz_pind2sz(pszind_t pind) {
assert(pind < NPSIZES+1);
assert(pind < sc_data_global.npsizes + 1);
return sz_pind2sz_lookup(pind);
}
static inline size_t
sz_psz2u(size_t psz) {
if (unlikely(psz > LARGE_MAXCLASS)) {
return LARGE_MAXCLASS + PAGE;
if (unlikely(psz > sc_data_global.large_maxclass)) {
return sc_data_global.large_maxclass + PAGE;
}
{
size_t x = lg_floor((psz<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ?
LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1;
size_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
LG_PAGE : x - SC_LG_NGROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (psz + delta_mask) & ~delta_mask;
return usize;
}
}
static inline szind_t
sz_size2index_compute(size_t size) {
if (unlikely(size > LARGE_MAXCLASS)) {
return NSIZES;
if (unlikely(size > sc_data_global.large_maxclass)) {
return SC_NSIZES;
}
#if (NTBINS != 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
#if (SC_NTINY != 0)
if (size <= (ZU(1) << sc_data_global.lg_tiny_maxclass)) {
szind_t lg_tmin = sc_data_global.lg_tiny_maxclass
- sc_data_global.ntiny + 1;
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
#endif
{
szind_t x = lg_floor((size<<1)-1);
szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
szind_t grp = shift << LG_SIZE_CLASS_GROUP;
szind_t shift = (x < SC_LG_NGROUP + LG_QUANTUM) ? 0 :
x - (SC_LG_NGROUP + LG_QUANTUM);
szind_t grp = shift << SC_LG_NGROUP;
szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
szind_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - SC_LG_NGROUP - 1;
size_t delta_inverse_mask = ZU(-1) << lg_delta;
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
((ZU(1) << SC_LG_NGROUP) - 1);
szind_t index = NTBINS + grp + mod;
szind_t index = sc_data_global.ntiny + grp + mod;
return index;
}
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index_lookup(size_t size) {
assert(size <= LOOKUP_MAXCLASS);
{
szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]);
assert(size <= SC_LOOKUP_MAXCLASS);
szind_t ret = (sz_size2index_tab[(size-1) >> SC_LG_TINY_MIN]);
assert(ret == sz_size2index_compute(size));
return ret;
}
}
JEMALLOC_ALWAYS_INLINE szind_t
sz_size2index(size_t size) {
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS)) {
if (likely(size <= SC_LOOKUP_MAXCLASS)) {
return sz_size2index_lookup(size);
}
return sz_size2index_compute(size);
@ -172,20 +167,21 @@ sz_size2index(size_t size) {
static inline size_t
sz_index2size_compute(szind_t index) {
#if (NTBINS > 0)
if (index < NTBINS) {
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
#if (SC_NTINY > 0)
if (index < sc_data_global.ntiny) {
return (ZU(1) << (sc_data_global.lg_tiny_maxclass
- sc_data_global.ntiny + 1 + index));
}
#endif
{
size_t reduced_index = index - NTBINS;
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
size_t reduced_index = index - sc_data_global.ntiny;
size_t grp = reduced_index >> SC_LG_NGROUP;
size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
1);
size_t grp_size_mask = ~((!!grp)-1);
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
(SC_LG_NGROUP-1))) << grp) & grp_size_mask;
size_t shift = (grp == 0) ? 1 : grp;
size_t lg_delta = shift + (LG_QUANTUM-1);
@ -205,18 +201,19 @@ sz_index2size_lookup(szind_t index) {
JEMALLOC_ALWAYS_INLINE size_t
sz_index2size(szind_t index) {
assert(index < NSIZES);
assert(index < SC_NSIZES);
return sz_index2size_lookup(index);
}
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute(size_t size) {
if (unlikely(size > LARGE_MAXCLASS)) {
if (unlikely(size > sc_data_global.large_maxclass)) {
return 0;
}
#if (NTBINS > 0)
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
#if (SC_NTINY > 0)
if (size <= (ZU(1) << sc_data_global.lg_tiny_maxclass)) {
size_t lg_tmin = sc_data_global.lg_tiny_maxclass
- sc_data_global.ntiny + 1;
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));
@ -224,8 +221,8 @@ sz_s2u_compute(size_t size) {
#endif
{
size_t x = lg_floor((size<<1)-1);
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
size_t lg_delta = (x < SC_LG_NGROUP + LG_QUANTUM + 1)
? LG_QUANTUM : x - SC_LG_NGROUP - 1;
size_t delta = ZU(1) << lg_delta;
size_t delta_mask = delta - 1;
size_t usize = (size + delta_mask) & ~delta_mask;
@ -248,7 +245,7 @@ sz_s2u_lookup(size_t size) {
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u(size_t size) {
assert(size > 0);
if (likely(size <= LOOKUP_MAXCLASS)) {
if (likely(size <= SC_LOOKUP_MAXCLASS)) {
return sz_s2u_lookup(size);
}
return sz_s2u_compute(size);
@ -265,7 +262,7 @@ sz_sa2u(size_t size, size_t alignment) {
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
if (size <= sc_data_global.small_maxclass && alignment < PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
@ -281,20 +278,20 @@ sz_sa2u(size_t size, size_t alignment) {
* 192 | 11000000 | 64
*/
usize = sz_s2u(ALIGNMENT_CEILING(size, alignment));
if (usize < LARGE_MINCLASS) {
if (usize < sc_data_global.large_minclass) {
return usize;
}
}
/* Large size class. Beware of overflow. */
if (unlikely(alignment > LARGE_MAXCLASS)) {
if (unlikely(alignment > sc_data_global.large_maxclass)) {
return 0;
}
/* Make sure result is a large size class. */
if (size <= LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
if (size <= sc_data_global.large_minclass) {
usize = sc_data_global.large_minclass;
} else {
usize = sz_s2u(size);
if (usize < size) {

View File

@ -1,15 +1,13 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#include "jemalloc/internal/size_classes.h"
extern bool opt_tcache;
extern ssize_t opt_lg_tcache_max;
extern cache_bin_info_t *tcache_bin_info;
/*
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
* large-object bins.
*/
extern unsigned nhbins;

View File

@ -3,7 +3,7 @@
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
@ -46,7 +46,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < NBINS);
assert(binind < SC_NBINS);
bin = tcache_small_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
@ -107,7 +107,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
cache_bin_t *bin;
bool tcache_success;
assert(binind >= NBINS &&binind < nhbins);
assert(binind >= SC_NBINS &&binind < nhbins);
bin = tcache_large_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
@ -166,7 +166,8 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
cache_bin_t *bin;
cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= sc_data_global.small_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, &bin_infos[binind]);
@ -191,7 +192,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
cache_bin_t *bin;
cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> sc_data_global.small_maxclass);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
if (slow_path && config_fill && unlikely(opt_junk_free)) {

View File

@ -1,9 +1,9 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/cache_bin.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/ticker.h"
/* Various uses of this struct need it to be a named type. */
@ -25,7 +25,7 @@ struct tcache_s {
* During tcache initialization, the avail pointer in each element of
* tbins is initialized to point to the proper offset within this array.
*/
cache_bin_t bins_small[NBINS];
cache_bin_t bins_small[SC_NBINS];
/*
* This data is less hot; we can be a little less careful with our
@ -50,13 +50,13 @@ struct tcache_s {
/* Next bin to GC. */
szind_t next_gc_bin;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t lg_fill_div[NBINS];
uint8_t lg_fill_div[SC_NBINS];
/*
* We put the cache bins for large size classes at the end of the
* struct, since some of them might not get used. This might end up
* letting us avoid touching an extra page if we don't have to.
*/
cache_bin_t bins_large[NSIZES-NBINS];
cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */

View File

@ -1,7 +1,7 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H
#define JEMALLOC_INTERNAL_TCACHE_TYPES_H
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
@ -45,7 +45,7 @@ typedef struct tcaches_s tcaches_t;
/* Number of tcache allocation/deallocation events between incremental GCs. */
#define TCACHE_GC_INCR \
((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1))
((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
/* Used in TSD static initializer only. Real init in tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}

View File

@ -8,7 +8,6 @@
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/util.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
@ -42,7 +41,7 @@ const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
#undef STEP
};
static div_info_t arena_binind_div_info[NBINS];
static div_info_t arena_binind_div_info[SC_NBINS];
size_t opt_huge_threshold = HUGE_THRESHOLD_DEFAULT;
size_t huge_threshold = HUGE_THRESHOLD_DEFAULT;
@ -128,7 +127,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
extents_npages_get(&arena->extents_dirty) +
extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
for (szind_t i = 0; i < NSIZES - NBINS; i++) {
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
&arena->stats.lstats[i].nmalloc);
arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
@ -151,7 +150,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents;
arena_stats_accum_zu(&astats->allocated_large,
curlextents * sz_index2size(NBINS + i));
curlextents * sz_index2size(SC_NBINS + i));
}
arena_stats_unlock(tsdn, &arena->stats);
@ -162,7 +161,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
szind_t i = 0;
for (; i < NBINS; i++) {
for (; i < SC_NBINS; i++) {
cache_bin_t *tbin = &descriptor->bins_small[i];
arena_stats_accum_zu(&astats->tcache_bytes,
tbin->ncached * sz_index2size(i));
@ -206,7 +205,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
nstime_update(&astats->uptime);
nstime_subtract(&astats->uptime, &arena->create_time);
for (szind_t i = 0; i < NBINS; i++) {
for (szind_t i = 0; i < SC_NBINS; i++) {
bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]);
}
}
@ -297,11 +296,11 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
cassert(config_stats);
if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
if (usize < sc_data_global.large_minclass) {
usize = sc_data_global.large_minclass;
}
index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats,
&arena->stats.lstats[hindex].nmalloc, 1);
@ -313,11 +312,11 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
cassert(config_stats);
if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
if (usize < sc_data_global.large_minclass) {
usize = sc_data_global.large_minclass;
}
index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats,
&arena->stats.lstats[hindex].ndalloc, 1);
@ -994,7 +993,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
assert(alloc_ctx.szind != SC_NSIZES);
if (config_stats || (config_prof && opt_prof)) {
usize = sz_index2size(alloc_ctx.szind);
@ -1010,7 +1009,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
/* Bins. */
for (unsigned i = 0; i < NBINS; i++) {
for (unsigned i = 0; i < SC_NBINS; i++) {
extent_t *slab;
bin_t *bin = &arena->bins[i];
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
@ -1331,7 +1330,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
size_t usize;
extent_t *slab;
assert(binind < NBINS);
assert(binind < SC_NBINS);
bin = &arena->bins[binind];
usize = sz_index2size(binind);
@ -1390,7 +1389,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
return NULL;
}
if (likely(size <= SMALL_MAXCLASS)) {
if (likely(size <= sc_data_global.small_maxclass)) {
return arena_malloc_small(tsdn, arena, ind, zero);
}
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
@ -1401,8 +1400,9 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) {
void *ret;
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
if (usize <= sc_data_global.small_maxclass
&& (alignment < PAGE
|| (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special slab placement. */
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true);
@ -1420,8 +1420,8 @@ void
arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
cassert(config_prof);
assert(ptr != NULL);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
assert(usize <= SMALL_MAXCLASS);
assert(isalloc(tsdn, ptr) == sc_data_global.large_minclass);
assert(usize <= sc_data_global.small_maxclass);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
@ -1445,15 +1445,15 @@ arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
extent_szind_set(extent, NBINS);
extent_szind_set(extent, SC_NBINS);
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
NBINS, false);
SC_NBINS, false);
assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
assert(isalloc(tsdn, ptr) == sc_data_global.large_minclass);
return LARGE_MINCLASS;
return sc_data_global.large_minclass;
}
void
@ -1594,33 +1594,35 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize) {
bool ret;
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
assert(extra == 0 || size + extra <= sc_data_global.large_maxclass);
extent_t *extent = iealloc(tsdn, ptr);
if (unlikely(size > LARGE_MAXCLASS)) {
if (unlikely(size > sc_data_global.large_maxclass)) {
ret = true;
goto done;
}
size_t usize_min = sz_s2u(size);
size_t usize_max = sz_s2u(size + extra);
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
if (likely(oldsize <= sc_data_global.small_maxclass && usize_min
<= sc_data_global.small_maxclass)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
assert(bin_infos[sz_size2index(oldsize)].reg_size ==
oldsize);
if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
sz_size2index(oldsize)) && (size > oldsize || usize_max <
oldsize)) {
if ((usize_max > sc_data_global.small_maxclass
|| sz_size2index(usize_max) != sz_size2index(oldsize))
&& (size > oldsize || usize_max < oldsize)) {
ret = true;
goto done;
}
arena_decay_tick(tsdn, extent_arena_get(extent));
ret = false;
} else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
} else if (oldsize >= sc_data_global.large_minclass
&& usize_max >= sc_data_global.large_minclass) {
ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
zero);
} else {
@ -1641,7 +1643,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
zero, tcache, true);
}
usize = sz_sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
return NULL;
}
return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
@ -1652,11 +1654,11 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
size_t usize = sz_s2u(size);
if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
if (unlikely(usize == 0 || size > sc_data_global.large_maxclass)) {
return NULL;
}
if (likely(usize <= SMALL_MAXCLASS)) {
if (likely(usize <= sc_data_global.small_maxclass)) {
/* Try to avoid moving the allocation. */
UNUSED size_t newsize;
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
@ -1669,7 +1671,8 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
}
}
if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
if (oldsize >= sc_data_global.large_minclass
&& usize >= sc_data_global.large_minclass) {
return large_ralloc(tsdn, arena, ptr, usize,
alignment, zero, tcache, hook_args);
}
@ -1751,8 +1754,8 @@ arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
if (new_limit != NULL) {
size_t limit = *new_limit;
/* Grow no more than the new limit. */
if ((new_ind = sz_psz2ind(limit + 1) - 1) >
EXTENT_GROW_MAX_PIND) {
if ((new_ind = sz_psz2ind(limit + 1) - 1)
>= sc_data_global.npsizes) {
return true;
}
}
@ -1896,7 +1899,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
arena->retain_grow_limit = EXTENT_GROW_MAX_PIND;
arena->retain_grow_limit = sc_data_global.npsizes - 1;
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
goto label_error;
@ -1909,7 +1912,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
bool err = bin_init(&arena->bins[i]);
if (err) {
goto label_error;
@ -1982,10 +1985,10 @@ arena_init_huge(void) {
bool huge_enabled;
/* The threshold should be large size class. */
if (opt_huge_threshold > LARGE_MAXCLASS ||
opt_huge_threshold < LARGE_MINCLASS) {
if (opt_huge_threshold > sc_data_global.large_maxclass ||
opt_huge_threshold < sc_data_global.large_minclass) {
opt_huge_threshold = 0;
huge_threshold = LARGE_MAXCLASS + PAGE;
huge_threshold = sc_data_global.large_maxclass + PAGE;
huge_enabled = false;
} else {
/* Reserve the index for the huge arena. */
@ -2001,16 +2004,11 @@ void
arena_boot(void) {
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
#define REGIND_bin_yes(index, reg_size) \
div_init(&arena_binind_div_info[(index)], (reg_size));
#define REGIND_bin_no(index, reg_size)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta))
SIZE_CLASSES
#undef REGIND_bin_yes
#undef REGIND_bin_no
#undef SC
for (unsigned i = 0; i < SC_NBINS; i++) {
sc_t *sc = &sc_data_global.sc[i];
div_init(&arena_binind_div_info[i],
(1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
}
}
void
@ -2055,7 +2053,7 @@ arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
for (unsigned i = 0; i < NBINS; i++) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_prefork(tsdn, &arena->bins[i]);
}
}
@ -2064,7 +2062,7 @@ void
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
bin_postfork_parent(tsdn, &arena->bins[i]);
}
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
@ -2108,7 +2106,7 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
}
}
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
bin_postfork_child(tsdn, &arena->bins[i]);
}
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);

View File

@ -262,8 +262,8 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
*/
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
*pind_last;
pszind_t pind_next = (*pind_last + 1 < sc_data_global.npsizes) ?
*pind_last + 1 : *pind_last;
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
@ -372,7 +372,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->extent_sn_next = extent_sn_next;
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < NSIZES; i++) {
for (szind_t i = 0; i < SC_NSIZES; i++) {
extent_heap_new(&base->avail[i]);
}
if (config_stats) {
@ -426,7 +426,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */

View File

@ -1,23 +1,34 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h"
const bin_info_t bin_infos[NBINS] = {
#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
{reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};
bin_info_t bin_infos[SC_NBINS];
void
bin_infos_init(sc_data_t *sc_data, bin_info_t bin_infos[SC_NBINS]) {
for (unsigned i = 0; i < SC_NBINS; i++) {
bin_info_t *bin_info = &bin_infos[i];
sc_t *sc = &sc_data->sc[i];
bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ ((size_t)sc->ndelta << sc->lg_delta);
bin_info->slab_size = (sc->pgs << LG_PAGE);
bin_info->nregs =
(uint32_t)(bin_info->slab_size / bin_info->reg_size);
bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
bin_info->nregs);
bin_info->bitmap_info = bitmap_info;
}
}
void
bin_boot(sc_data_t *sc_data) {
assert(sc_data->initialized);
bin_infos_init(sc_data, bin_infos);
}
bool
bin_init(bin_t *bin) {

View File

@ -275,7 +275,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
lg_curcells++;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0
|| usize > sc_data_global.large_maxclass)) {
ret = true;
goto label_return;
}
@ -320,7 +321,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
return;
}
tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
@ -396,7 +397,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0 || usize > sc_data_global.large_maxclass)) {
ret = true;
goto label_return;
}

View File

@ -8,7 +8,7 @@
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
@ -710,9 +710,9 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nmalloc_small = 0;
ctl_arena->astats->ndalloc_small = 0;
ctl_arena->astats->nrequests_small = 0;
memset(ctl_arena->astats->bstats, 0, NBINS *
memset(ctl_arena->astats->bstats, 0, SC_NBINS *
sizeof(bin_stats_t));
memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t));
}
}
@ -729,7 +729,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->astats->astats, ctl_arena->astats->bstats,
ctl_arena->astats->lstats);
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
ctl_arena->astats->allocated_small +=
ctl_arena->astats->bstats[i].curregs *
sz_index2size(i);
@ -841,7 +841,7 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->astats.uptime = astats->astats.uptime;
}
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sdstats->bstats[i].nrequests +=
@ -867,7 +867,7 @@ MUTEX_PROF_ARENA_MUTEXES
&astats->bstats[i].mutex_data);
}
for (i = 0; i < NSIZES - NBINS; i++) {
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc);
ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
@ -2433,7 +2433,7 @@ arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
@ -2441,19 +2441,19 @@ CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t i) {
if (i > NBINS) {
if (i > SC_NBINS) {
return NULL;
}
return super_arenas_bin_i_node;
}
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t i) {
if (i > NSIZES - NBINS) {
if (i > SC_NSIZES - SC_NBINS) {
return NULL;
}
return super_arenas_lextent_i_node;
@ -2818,7 +2818,7 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);
for (szind_t i = 0; i < NBINS; i++) {
for (szind_t i = 0; i < SC_NBINS; i++) {
bin_t *bin = &arena->bins[i];
MUTEX_PROF_RESET(bin->lock);
}
@ -2849,7 +2849,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t j) {
if (j > NBINS) {
if (j > SC_NBINS) {
return NULL;
}
return super_stats_arenas_i_bins_j_node;
@ -2870,7 +2870,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
size_t miblen, size_t j) {
if (j > NSIZES - NBINS) {
if (j > SC_NSIZES - SC_NBINS) {
return NULL;
}
return super_stats_arenas_i_lextents_j_node;

View File

@ -20,7 +20,7 @@ mutex_pool_t extent_mutex_pool;
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
static const bitmap_info_t extents_bitmap_info =
BITMAP_INFO_INITIALIZER(NPSIZES+1);
BITMAP_INFO_INITIALIZER(SC_NPSIZES_MAX+1);
static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
size_t size, size_t alignment, bool *zero, bool *commit,
@ -259,7 +259,7 @@ extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - sz_large_pad <= LARGE_MAXCLASS);
assert(size - sz_large_pad <= sc_data_global.large_maxclass);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
@ -288,7 +288,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
malloc_mutex_rank_exclusive)) {
return true;
}
for (unsigned i = 0; i < NPSIZES+1; i++) {
for (unsigned i = 0; i < sc_data_global.npsizes + 1; i++) {
extent_heap_new(&extents->heaps[i]);
}
bitmap_init(extents->bitmap, &extents_bitmap_info, true);
@ -375,7 +375,7 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
&extents_bitmap_info, (size_t)pind); i < pind_max; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(i < NPSIZES);
assert(i < sc_data_global.npsizes);
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
uintptr_t base = (uintptr_t)extent_base_get(extent);
@ -405,7 +405,7 @@ extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
if (i < NPSIZES+1) {
if (i < sc_data_global.npsizes + 1) {
/*
* In order to reduce fragmentation, avoid reusing and splitting
* large extents for much smaller sizes.
@ -433,8 +433,9 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
&extents_bitmap_info, (size_t)pind);
i < sc_data_global.npsizes + 1;
i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)i+1)) {
assert(!extent_heap_empty(&extents->heaps[i]));
extent_t *extent = extent_heap_first(&extents->heaps[i]);
@ -442,10 +443,10 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
ret = extent;
}
if (i == NPSIZES) {
if (i == sc_data_global.npsizes) {
break;
}
assert(i < NPSIZES);
assert(i < sc_data_global.npsizes);
}
return ret;
@ -821,7 +822,7 @@ extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
extent_lock(tsdn, extent);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
@ -962,7 +963,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
if (leadsize != 0) {
*lead = *extent;
*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
*lead, leadsize, NSIZES, false, esize + trailsize, szind,
*lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
slab, growing_retained);
if (*extent == NULL) {
*to_leak = *lead;
@ -974,7 +975,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */
if (trailsize != 0) {
*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
esize, szind, slab, trailsize, NSIZES, false,
esize, szind, slab, trailsize, SC_NSIZES, false,
growing_retained);
if (*trail == NULL) {
*to_leak = *extent;
@ -991,7 +992,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
* splitting occurred.
*/
extent_szind_set(*extent, szind);
if (szind != NSIZES) {
if (szind != SC_NSIZES) {
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(*extent), szind, slab);
if (slab && extent_size_get(*extent) > PAGE) {
@ -1248,11 +1249,13 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
while (alloc_size < alloc_size_min) {
egn_skip++;
if (arena->extent_grow_next + egn_skip == NPSIZES) {
if (arena->extent_grow_next + egn_skip ==
sc_data_global.npsizes) {
/* Outside legal range. */
goto label_err;
}
assert(arena->extent_grow_next + egn_skip < NPSIZES);
assert(arena->extent_grow_next + egn_skip
< sc_data_global.npsizes);
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
}
@ -1275,7 +1278,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
extent_hook_post_reentrancy(tsdn);
}
extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
arena_extent_sn_next(arena), extent_state_active, zeroed,
committed, true);
if (ptr == NULL) {
@ -1610,7 +1613,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
malloc_mutex_lock(tsdn, &extents->mtx);
extent_hooks_assure_initialized(arena, r_extent_hooks);
extent_szind_set(extent, NSIZES);
extent_szind_set(extent, SC_NSIZES);
if (extent_slab_get(extent)) {
extent_interior_deregister(tsdn, rtree_ctx, extent);
extent_slab_set(extent, false);
@ -1622,7 +1625,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
if (!extents->delay_coalesce) {
extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
rtree_ctx, extents, extent, NULL, growing_retained);
} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
} else if (extent_size_get(extent) >= sc_data_global.large_minclass) {
/* Always coalesce large extents eagerly. */
bool coalesced;
size_t prev_size;
@ -1633,7 +1636,8 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
r_extent_hooks, rtree_ctx, extents, extent,
&coalesced, growing_retained);
} while (coalesced &&
extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
extent_size_get(extent)
>= prev_size + sc_data_global.large_minclass);
}
extent_deactivate_locked(tsdn, arena, extents, extent);
@ -2132,22 +2136,23 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
if (a_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
NSIZES, false);
SC_NSIZES, false);
}
if (b_elm_b != NULL) {
rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
NSIZES, false);
SC_NSIZES, false);
} else {
b_elm_b = b_elm_a;
}
extent_size_set(a, extent_size_get(a) + extent_size_get(b));
extent_szind_set(a, NSIZES);
extent_szind_set(a, SC_NSIZES);
extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
extent_sn_get(a) : extent_sn_get(b));
extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
false);
extent_unlock2(tsdn, a, b);

View File

@ -154,7 +154,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page,
gap_size_page, false, NSIZES,
gap_size_page, false, SC_NSIZES,
arena_extent_sn_next(arena),
extent_state_active, false, true, true);
}
@ -198,7 +198,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_t extent;
extent_init(&extent, arena, ret, size,
size, false, NSIZES,
size, false, SC_NSIZES,
extent_state_active, false, true,
true);
if (extent_purge_forced_wrapper(tsdn,

View File

@ -13,7 +13,7 @@
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
@ -1158,7 +1158,8 @@ malloc_conf_init(void) {
/* Experimental feature. Will be documented later.*/
CONF_HANDLE_SIZE_T(opt_huge_threshold,
"experimental_huge_threshold",
LARGE_MINCLASS, LARGE_MAXCLASS, yes, yes, false)
sc_data_global.large_minclass,
sc_data_global.large_maxclass, yes, yes, false)
CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
"lg_extent_max_active_fit", 0,
(sizeof(size_t) << 3), yes, yes, false)
@ -1294,6 +1295,10 @@ static bool
malloc_init_hard_a0_locked() {
malloc_initializer = INITIALIZER;
sc_boot();
sz_boot(&sc_data_global);
bin_boot(&sc_data_global);
if (config_prof) {
prof_boot0();
}
@ -1747,12 +1752,13 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
szind_t ind_large;
size_t bumped_usize = usize;
if (usize <= SMALL_MAXCLASS) {
assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
sz_sa2u(LARGE_MINCLASS, dopts->alignment))
== LARGE_MINCLASS);
ind_large = sz_size2index(LARGE_MINCLASS);
bumped_usize = sz_s2u(LARGE_MINCLASS);
if (usize <= sc_data_global.small_maxclass) {
assert(((dopts->alignment == 0) ?
sz_s2u(sc_data_global.large_minclass) :
sz_sa2u(sc_data_global.large_minclass, dopts->alignment))
== sc_data_global.large_minclass);
ind_large = sz_size2index(sc_data_global.large_minclass);
bumped_usize = sz_s2u(sc_data_global.large_minclass);
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
bumped_usize, ind_large);
if (unlikely(ret == NULL)) {
@ -1855,16 +1861,18 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
if (dopts->alignment == 0) {
ind = sz_size2index(size);
if (unlikely(ind >= NSIZES)) {
if (unlikely(ind >= SC_NSIZES)) {
goto label_oom;
}
if (config_stats || (config_prof && opt_prof)) {
usize = sz_index2size(ind);
assert(usize > 0 && usize <= LARGE_MAXCLASS);
assert(usize > 0 && usize
<= sc_data_global.large_maxclass);
}
} else {
usize = sz_sa2u(size, dopts->alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0
|| usize > sc_data_global.large_maxclass)) {
goto label_oom;
}
}
@ -1900,7 +1908,8 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
alloc_ctx_t alloc_ctx;
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
alloc_ctx.slab = (usize
<= sc_data_global.small_maxclass);
allocation = imalloc_no_sample(
sopts, dopts, tsd, usize, usize, ind);
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
@ -2198,9 +2207,9 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
if (tctx == NULL) {
return NULL;
}
if (usize <= SMALL_MAXCLASS) {
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false,
hook_args);
if (usize <= sc_data_global.small_maxclass) {
p = iralloc(tsd, old_ptr, old_usize,
sc_data_global.large_minclass, 0, false, hook_args);
if (p == NULL) {
return NULL;
}
@ -2257,7 +2266,7 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
assert(alloc_ctx.szind != SC_NSIZES);
size_t usize;
if (config_prof && opt_prof) {
@ -2384,12 +2393,13 @@ je_realloc(void *ptr, size_t arg_size) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
usize = sz_s2u(size);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0
|| usize > sc_data_global.large_maxclass)) {
ret = NULL;
} else {
ret = irealloc_prof(tsd, ptr, old_usize, usize,
@ -2702,9 +2712,10 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
if (tctx == NULL) {
return NULL;
}
if (usize <= SMALL_MAXCLASS) {
p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
alignment, zero, tcache, arena, hook_args);
if (usize <= sc_data_global.small_maxclass) {
p = iralloct(tsdn, old_ptr, old_usize,
sc_data_global.large_minclass, alignment, zero, tcache,
arena, hook_args);
if (p == NULL) {
return NULL;
}
@ -2804,7 +2815,7 @@ je_rallocx(void *ptr, size_t size, int flags) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
@ -2813,7 +2824,8 @@ je_rallocx(void *ptr, size_t size, int flags) {
if (config_prof && opt_prof) {
usize = (alignment == 0) ?
sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
if (unlikely(usize == 0
|| usize > sc_data_global.large_maxclass)) {
goto label_oom;
}
p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
@ -2898,17 +2910,19 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
*/
if (alignment == 0) {
usize_max = sz_s2u(size+extra);
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
assert(usize_max > 0
&& usize_max <= sc_data_global.large_maxclass);
} else {
usize_max = sz_sa2u(size+extra, alignment);
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
if (unlikely(usize_max == 0
|| usize_max > sc_data_global.large_maxclass)) {
/*
* usize_max is out of range, and chances are that
* allocation will fail, but use the maximum possible
* value and carry on with prof_alloc_prep(), just in
* case allocation succeeds.
*/
usize_max = LARGE_MAXCLASS;
usize_max = sc_data_global.large_maxclass;
}
}
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
@ -2951,24 +2965,24 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
/*
* The API explicitly absolves itself of protecting against (size +
* extra) numerical overflow, but we may need to clamp extra to avoid
* exceeding LARGE_MAXCLASS.
* exceeding sc_data_global.large_maxclass.
*
* Ordinarily, size limit checking is handled deeper down, but here we
* have to check as part of (size + extra) clamping, since we need the
* clamped value in the above helper functions.
*/
if (unlikely(size > LARGE_MAXCLASS)) {
if (unlikely(size > sc_data_global.large_maxclass)) {
usize = old_usize;
goto label_not_resized;
}
if (unlikely(LARGE_MAXCLASS - size < extra)) {
extra = LARGE_MAXCLASS - size;
if (unlikely(sc_data_global.large_maxclass - size < extra)) {
extra = sc_data_global.large_maxclass - size;
}
if (config_prof && opt_prof) {
@ -3155,7 +3169,7 @@ je_nallocx(size_t size, int flags) {
check_entry_exit_locking(tsdn);
usize = inallocx(tsdn, size, flags);
if (unlikely(usize > LARGE_MAXCLASS)) {
if (unlikely(usize > sc_data_global.large_maxclass)) {
LOG("core.nallocx.exit", "result: %zu", ZU(0));
return 0;
}

View File

@ -28,7 +28,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sz_sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
if (unlikely(ausize == 0 || ausize > sc_data_global.large_maxclass)) {
return NULL;
}
@ -109,7 +109,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
&extent_hooks, extent, usize + sz_large_pad,
sz_size2index(usize), false, diff, NSIZES, false);
sz_size2index(usize), false, diff, SC_NSIZES, false);
if (trail == NULL) {
return true;
}
@ -154,17 +154,17 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
bool new_mapping;
if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, extent_past_get(extent), trailsize, 0,
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL
CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
|| (trail = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) {
CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
if (config_stats) {
new_mapping = false;
}
} else {
if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
extent_past_get(extent), trailsize, 0, CACHELINE, false,
NSIZES, &is_zeroed_trail, &commit)) == NULL) {
SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
return true;
}
if (config_stats) {
@ -221,9 +221,10 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
size_t oldusize = extent_usize_get(extent);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS);
assert(usize_min > 0 && usize_max <= sc_data_global.large_maxclass);
/* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS);
assert(oldusize >= sc_data_global.large_minclass
&& usize_max >= sc_data_global.large_minclass);
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
@ -277,9 +278,10 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t oldusize = extent_usize_get(extent);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= LARGE_MAXCLASS);
assert(usize > 0 && usize <= sc_data_global.large_maxclass);
/* Both allocation sizes must be large to avoid a move. */
assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS);
assert(oldusize >= sc_data_global.large_minclass
&& usize >= sc_data_global.large_minclass);
/* Try to avoid moving the allocation. */
if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {

62
src/sc.c Normal file
View File

@ -0,0 +1,62 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/size_classes.h"
sc_data_t sc_data_global;
static void
fill_sc(sc_data_t *data, int index, int lg_base, int lg_delta, int ndelta,
bool psz, bool bin, int pgs, int lg_delta_lookup) {
sc_t *sc = &data->sc[index];
sc->index = index;
sc->lg_base = lg_base;
sc->lg_delta = lg_delta;
sc->ndelta = ndelta;
sc->psz = psz;
sc->bin = bin;
sc->pgs = pgs;
sc->lg_delta_lookup = lg_delta_lookup;
}
void
sc_data_init(sc_data_t *data) {
assert(SC_NTINY == NTBINS);
assert(SC_NSIZES == NSIZES);
assert(SC_NBINS == NBINS);
assert(NPSIZES <= SC_NPSIZES_MAX);
assert(!data->initialized);
data->initialized = true;
data->ntiny = NTBINS;
data->nlbins = NLBINS;
data->nbins = NBINS;
data->nsizes = NSIZES;
data->lg_ceil_nsizes = LG_CEIL_NSIZES;
data->npsizes = NPSIZES;
#if SC_NTINY != 0
data->lg_tiny_maxclass = LG_TINY_MAXCLASS;
#else
data->lg_tiny_maxclass = -1;
#endif
data->lookup_maxclass = LOOKUP_MAXCLASS;
data->small_maxclass = SMALL_MAXCLASS;
data->lg_large_minclass = LG_LARGE_MINCLASS;
data->large_minclass = LARGE_MINCLASS;
data->large_maxclass = LARGE_MAXCLASS;
#define no 0
#define yes 1
#define SC(index, lg_base_base, lg_delta, ndelta, psz, bin, pgs, \
lg_delta_lookup) \
fill_sc(data, index, lg_base_base, lg_delta, ndelta, psz, bin, \
pgs, lg_delta_lookup);
SIZE_CLASSES
#undef no
#undef yes
#undef SC
}
void
sc_boot() {
sc_data_init(&sc_data_global);
}

152
src/sz.c
View File

@ -2,106 +2,60 @@
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
const size_t sz_pind2sz_tab[NPSIZES+1] = {
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
#define PSZ_no(lg_grp, ndelta, lg_delta)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
PSZ_##psz(lg_grp, ndelta, lg_delta)
SIZE_CLASSES
#undef PSZ_yes
#undef PSZ_no
#undef SC
(LARGE_MAXCLASS + PAGE)
};
size_t sz_pind2sz_tab[SC_NPSIZES_MAX+1];
static void
sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
int pind = 0;
for (unsigned i = 0; i < SC_NSIZES; i++) {
const sc_t *sc = &sc_data->sc[i];
if (sc->psz) {
sz_pind2sz_tab[pind] = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
pind++;
}
}
sz_pind2sz_tab[pind] = sc_data->large_maxclass + PAGE;
}
JEMALLOC_ALIGNED(CACHELINE)
const size_t sz_index2size_tab[NSIZES] = {
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
};
size_t sz_index2size_tab[SC_NSIZES];
static void
sz_boot_index2size_tab(const sc_data_t *sc_data) {
for (unsigned i = 0; i < SC_NSIZES; i++) {
const sc_t *sc = &sc_data->sc[i];
sz_index2size_tab[i] = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << (sc->lg_delta));
}
}
/*
* To keep this table small, we divide sizes by the tiny min size, which gives
* the smallest interval for which the result can change.
*/
JEMALLOC_ALIGNED(CACHELINE)
const uint8_t sz_size2index_tab[] = {
#if LG_TINY_MIN == 0
/* The div module doesn't support division by 1. */
#error "Unsupported LG_TINY_MIN"
#define S2B_0(i) i,
#elif LG_TINY_MIN == 1
#warning "Dangerous LG_TINY_MIN"
#define S2B_1(i) i,
#elif LG_TINY_MIN == 2
#warning "Dangerous LG_TINY_MIN"
#define S2B_2(i) i,
#elif LG_TINY_MIN == 3
#define S2B_3(i) i,
#elif LG_TINY_MIN == 4
#define S2B_4(i) i,
#elif LG_TINY_MIN == 5
#define S2B_5(i) i,
#elif LG_TINY_MIN == 6
#define S2B_6(i) i,
#elif LG_TINY_MIN == 7
#define S2B_7(i) i,
#elif LG_TINY_MIN == 8
#define S2B_8(i) i,
#elif LG_TINY_MIN == 9
#define S2B_9(i) i,
#elif LG_TINY_MIN == 10
#define S2B_10(i) i,
#elif LG_TINY_MIN == 11
#define S2B_11(i) i,
#else
#error "Unsupported LG_TINY_MIN"
#endif
#if LG_TINY_MIN < 1
#define S2B_1(i) S2B_0(i) S2B_0(i)
#endif
#if LG_TINY_MIN < 2
#define S2B_2(i) S2B_1(i) S2B_1(i)
#endif
#if LG_TINY_MIN < 3
#define S2B_3(i) S2B_2(i) S2B_2(i)
#endif
#if LG_TINY_MIN < 4
#define S2B_4(i) S2B_3(i) S2B_3(i)
#endif
#if LG_TINY_MIN < 5
#define S2B_5(i) S2B_4(i) S2B_4(i)
#endif
#if LG_TINY_MIN < 6
#define S2B_6(i) S2B_5(i) S2B_5(i)
#endif
#if LG_TINY_MIN < 7
#define S2B_7(i) S2B_6(i) S2B_6(i)
#endif
#if LG_TINY_MIN < 8
#define S2B_8(i) S2B_7(i) S2B_7(i)
#endif
#if LG_TINY_MIN < 9
#define S2B_9(i) S2B_8(i) S2B_8(i)
#endif
#if LG_TINY_MIN < 10
#define S2B_10(i) S2B_9(i) S2B_9(i)
#endif
#if LG_TINY_MIN < 11
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
#undef S2B_4
#undef S2B_5
#undef S2B_6
#undef S2B_7
#undef S2B_8
#undef S2B_9
#undef S2B_10
#undef S2B_11
#undef S2B_no
#undef SC
};
uint8_t sz_size2index_tab[SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN];
static void
sz_boot_size2index_tab(const sc_data_t *sc_data) {
size_t dst_max = (SC_LOOKUP_MAXCLASS >> SC_LG_TINY_MIN);
size_t dst_ind = 0;
for (unsigned sc_ind = 0; sc_ind < SC_NSIZES && dst_ind < dst_max;
sc_ind++) {
const sc_t *sc = &sc_data->sc[sc_ind];
size_t sz = (ZU(1) << sc->lg_base)
+ (ZU(sc->ndelta) << sc->lg_delta);
size_t max_ind = ((sz - 1) >> SC_LG_TINY_MIN);
for (; dst_ind <= max_ind && dst_ind < dst_max; dst_ind++) {
sz_size2index_tab[dst_ind] = sc_ind;
}
}
}
void
sz_boot(const sc_data_t *sc_data) {
sz_boot_pind2sz_tab(sc_data);
sz_boot_index2size_tab(sc_data);
sz_boot_size2index_tab(sc_data);
}

View File

@ -4,7 +4,7 @@
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/sc.h"
/******************************************************************************/
/* Data. */
@ -41,7 +41,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
szind_t binind = tcache->next_gc_bin;
cache_bin_t *tbin;
if (binind < NBINS) {
if (binind < SC_NBINS) {
tbin = tcache_small_bin_get(tcache, binind);
} else {
tbin = tcache_large_bin_get(tcache, binind);
@ -50,7 +50,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
if (binind < SC_NBINS) {
tcache_bin_flush_small(tsd, tcache, tbin, binind,
tbin->ncached - tbin->low_water + (tbin->low_water
>> 2));
@ -72,7 +72,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
* Increase fill count by 2X for small bins. Make sure
* lg_fill_div stays greater than 0.
*/
if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
tcache->lg_fill_div[binind]--;
}
}
@ -105,7 +105,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem) {
bool merged_stats = false;
assert(binind < NBINS);
assert(binind < SC_NBINS);
assert((cache_bin_sz_t)rem <= tbin->ncached);
arena_t *arena = tcache->arena;
@ -369,10 +369,10 @@ tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
size_t stack_offset = 0;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
memset(tcache->bins_small, 0, sizeof(cache_bin_t) * NBINS);
memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - NBINS));
memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
unsigned i = 0;
for (; i < NBINS; i++) {
for (; i < SC_NBINS; i++) {
tcache->lg_fill_div[i] = 1;
stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
/*
@ -464,7 +464,7 @@ static void
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
assert(tcache->arena != NULL);
for (unsigned i = 0; i < NBINS; i++) {
for (unsigned i = 0; i < SC_NBINS; i++) {
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
@ -472,7 +472,7 @@ tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
assert(tbin->tstats.nrequests == 0);
}
}
for (unsigned i = NBINS; i < nhbins; i++) {
for (unsigned i = SC_NBINS; i < nhbins; i++) {
cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
@ -538,7 +538,7 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
cassert(config_stats);
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
bin_t *bin = &arena->bins[i];
cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
malloc_mutex_lock(tsdn, &bin->lock);
@ -658,8 +658,8 @@ bool
tcache_boot(tsdn_t *tsdn) {
/* If necessary, clamp opt_lg_tcache_max. */
if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
SMALL_MAXCLASS) {
tcache_maxclass = SMALL_MAXCLASS;
sc_data_global.small_maxclass) {
tcache_maxclass = sc_data_global.small_maxclass;
} else {
tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
}
@ -679,7 +679,7 @@ tcache_boot(tsdn_t *tsdn) {
}
stack_nelms = 0;
unsigned i;
for (i = 0; i < NBINS; i++) {
for (i = 0; i < SC_NBINS; i++) {
if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
tcache_bin_info[i].ncached_max =
TCACHE_NSLOTS_SMALL_MIN;

View File

@ -77,7 +77,7 @@ vsalloc(tsdn_t *tsdn, const void *ptr) {
return 0;
}
if (szind == NSIZES) {
if (szind == SC_NSIZES) {
return 0;
}

View File

@ -123,13 +123,14 @@ test_junk(size_t sz_min, size_t sz_max) {
TEST_BEGIN(test_junk_small) {
test_skip_if(!config_fill);
test_junk(1, SMALL_MAXCLASS-1);
test_junk(1, sc_data_global.small_maxclass - 1);
}
TEST_END
TEST_BEGIN(test_junk_large) {
test_skip_if(!config_fill);
test_junk(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
test_junk(sc_data_global.small_maxclass + 1,
(1U << (sc_data_global.lg_large_minclass + 1)));
}
TEST_END

View File

@ -581,7 +581,7 @@ TEST_BEGIN(test_arena_i_retain_grow_limit) {
assert_d_eq(mallctlbymib(mib, miblen, &default_limit, &sz, NULL, 0), 0,
"Unexpected mallctl() failure");
assert_zu_eq(default_limit, sz_pind2sz(EXTENT_GROW_MAX_PIND),
assert_zu_eq(default_limit, sz_pind2sz(sc_data_global.npsizes - 1),
"Unexpected default for retain_grow_limit");
new_limit = PAGE - 1;
@ -686,8 +686,8 @@ TEST_BEGIN(test_arenas_constants) {
TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM);
TEST_ARENAS_CONSTANT(size_t, page, PAGE);
TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS);
TEST_ARENAS_CONSTANT(unsigned, nlextents, NSIZES - NBINS);
TEST_ARENAS_CONSTANT(unsigned, nbins, SC_NBINS);
TEST_ARENAS_CONSTANT(unsigned, nlextents, SC_NSIZES - SC_NBINS);
#undef TEST_ARENAS_CONSTANT
}
@ -720,7 +720,8 @@ TEST_BEGIN(test_arenas_lextent_constants) {
assert_zu_eq(name, expected, "Incorrect "#name" size"); \
} while (0)
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size, LARGE_MINCLASS);
TEST_ARENAS_LEXTENT_CONSTANT(size_t, size,
sc_data_global.large_minclass);
#undef TEST_ARENAS_LEXTENT_CONSTANT
}

View File

@ -29,12 +29,12 @@ TEST_BEGIN(test_gdump) {
prof_dump_open = prof_dump_open_intercept;
did_prof_dump_open = false;
p = mallocx((1U << LG_LARGE_MINCLASS), 0);
p = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
did_prof_dump_open = false;
q = mallocx((1U << LG_LARGE_MINCLASS), 0);
q = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");
@ -45,7 +45,7 @@ TEST_BEGIN(test_gdump) {
"Unexpected mallctl failure while disabling prof.gdump");
assert(gdump_old);
did_prof_dump_open = false;
r = mallocx((1U << LG_LARGE_MINCLASS), 0);
r = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_false(did_prof_dump_open, "Unexpected profile dump");
@ -56,7 +56,7 @@ TEST_BEGIN(test_gdump) {
"Unexpected mallctl failure while enabling prof.gdump");
assert(!gdump_old);
did_prof_dump_open = false;
s = mallocx((1U << LG_LARGE_MINCLASS), 0);
s = mallocx((1U << sc_data_global.lg_large_minclass), 0);
assert_ptr_not_null(q, "Unexpected mallocx() failure");
assert_true(did_prof_dump_open, "Expected a profile dump");

View File

@ -85,10 +85,10 @@ TEST_END
TEST_BEGIN(test_rtree_extrema) {
extent_t extent_a, extent_b;
extent_init(&extent_a, NULL, NULL, LARGE_MINCLASS, false,
sz_size2index(LARGE_MINCLASS), 0, extent_state_active, false,
false, true);
extent_init(&extent_b, NULL, NULL, 0, false, NSIZES, 0,
extent_init(&extent_a, NULL, NULL, sc_data_global.large_minclass, false,
sz_size2index(sc_data_global.large_minclass), 0,
extent_state_active, false, false, true);
extent_init(&extent_b, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true);
tsdn_t *tsdn = tsdn_fetch();
@ -125,7 +125,7 @@ TEST_BEGIN(test_rtree_bits) {
PAGE + (((uintptr_t)1) << LG_PAGE) - 1};
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true);
rtree_t *rtree = &test_rtree;
@ -135,7 +135,7 @@ TEST_BEGIN(test_rtree_bits) {
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, keys[i],
&extent, NSIZES, false),
&extent, SC_NSIZES, false),
"Unexpected rtree_write() failure");
for (unsigned j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) {
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
@ -166,7 +166,7 @@ TEST_BEGIN(test_rtree_random) {
rtree_ctx_data_init(&rtree_ctx);
extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,
extent_init(&extent, NULL, NULL, 0, false, SC_NSIZES, 0,
extent_state_active, false, false, true);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
@ -177,7 +177,8 @@ TEST_BEGIN(test_rtree_random) {
&rtree_ctx, keys[i], false, true);
assert_ptr_not_null(elm,
"Unexpected rtree_leaf_elm_lookup() failure");
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, NSIZES, false);
rtree_leaf_elm_write(tsdn, rtree, elm, &extent, SC_NSIZES,
false);
assert_ptr_eq(rtree_extent_read(tsdn, rtree, &rtree_ctx,
keys[i], true), &extent,
"rtree_extent_read() should return previously set value");

View File

@ -142,11 +142,11 @@ TEST_BEGIN(test_overflow) {
max_size_class = get_max_size_class();
max_psz = max_size_class + PAGE;
assert_u_eq(sz_size2index(max_size_class+1), NSIZES,
assert_u_eq(sz_size2index(max_size_class+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), NSIZES,
assert_u_eq(sz_size2index(ZU(PTRDIFF_MAX)+1), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_u_eq(sz_size2index(SIZE_T_MAX), NSIZES,
assert_u_eq(sz_size2index(SIZE_T_MAX), SC_NSIZES,
"sz_size2index() should return NSIZES on overflow");
assert_zu_eq(sz_s2u(max_size_class+1), 0,
@ -156,13 +156,16 @@ TEST_BEGIN(test_overflow) {
assert_zu_eq(sz_s2u(SIZE_T_MAX), 0,
"sz_s2u() should return 0 on overflow");
assert_u_eq(sz_psz2ind(max_size_class+1), NPSIZES,
assert_u_eq(sz_psz2ind(max_size_class+1), sc_data_global.npsizes,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES,
assert_u_eq(sz_psz2ind(ZU(PTRDIFF_MAX)+1), sc_data_global.npsizes,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_eq(sz_psz2ind(SIZE_T_MAX), NPSIZES,
assert_u_eq(sz_psz2ind(SIZE_T_MAX), sc_data_global.npsizes,
"sz_psz2ind() should return NPSIZES on overflow");
assert_u_le(sc_data_global.npsizes, SC_NPSIZES_MAX,
"Dynamic value of npsizes is higher than static bound.");
assert_zu_eq(sz_psz2u(max_size_class+1), max_psz,
"sz_psz2u() should return (LARGE_MAXCLASS + PAGE) for unsupported"
" size");

View File

@ -3,7 +3,7 @@
TEST_BEGIN(test_arena_slab_regind) {
szind_t binind;
for (binind = 0; binind < NBINS; binind++) {
for (binind = 0; binind < SC_NBINS; binind++) {
size_t regind;
extent_t slab;
const bin_info_t *bin_info = &bin_infos[binind];

View File

@ -33,7 +33,7 @@ TEST_BEGIN(test_stats_large) {
size_t sz;
int expected = config_stats ? 0 : ENOENT;
p = mallocx(SMALL_MAXCLASS+1, MALLOCX_ARENA(0));
p = mallocx(sc_data_global.small_maxclass + 1, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
@ -74,9 +74,10 @@ TEST_BEGIN(test_stats_arenas_summary) {
uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
little = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0));
little = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0));
assert_ptr_not_null(little, "Unexpected mallocx() failure");
large = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
large = mallocx((1U << sc_data_global.lg_large_minclass),
MALLOCX_ARENA(0));
assert_ptr_not_null(large, "Unexpected mallocx() failure");
dallocx(little, 0);
@ -148,7 +149,7 @@ TEST_BEGIN(test_stats_arenas_small) {
no_lazy_lock(); /* Lazy locking would dodge tcache testing. */
p = mallocx(SMALL_MAXCLASS, MALLOCX_ARENA(0));
p = mallocx(sc_data_global.small_maxclass, MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0),
@ -191,7 +192,7 @@ TEST_BEGIN(test_stats_arenas_large) {
uint64_t epoch, nmalloc, ndalloc;
int expected = config_stats ? 0 : ENOENT;
p = mallocx((1U << LG_LARGE_MINCLASS), MALLOCX_ARENA(0));
p = mallocx((1U << sc_data_global.lg_large_minclass), MALLOCX_ARENA(0));
assert_ptr_not_null(p, "Unexpected mallocx() failure");
assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),

View File

@ -41,13 +41,14 @@ test_zero(size_t sz_min, size_t sz_max) {
TEST_BEGIN(test_zero_small) {
test_skip_if(!config_fill);
test_zero(1, SMALL_MAXCLASS-1);
test_zero(1, sc_data_global.small_maxclass - 1);
}
TEST_END
TEST_BEGIN(test_zero_large) {
test_skip_if(!config_fill);
test_zero(SMALL_MAXCLASS+1, (1U << (LG_LARGE_MINCLASS+1)));
test_zero(sc_data_global.small_maxclass + 1,
1U << (sc_data_global.lg_large_minclass + 1));
}
TEST_END