Initialize arena_bin_info at compile time rather than at boot time.

This resolves #370.
This commit is contained in:
Jason Evans 2016-04-07 08:04:12 -04:00
parent b683734b43
commit 627372b459
5 changed files with 100 additions and 96 deletions

View File

@ -470,7 +470,7 @@ extern const char *purge_mode_names[];
extern ssize_t opt_lg_dirty_mult;
extern ssize_t opt_decay_time;
extern arena_bin_info_t arena_bin_info[NBINS];
extern const arena_bin_info_t arena_bin_info[NBINS];
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
@ -511,13 +511,13 @@ void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *);
extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
#endif
void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
bool zero);
@ -634,7 +634,7 @@ bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
size_t arena_run_regind(arena_run_t *run, const arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
@ -1058,7 +1058,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
const arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
const arena_bin_info_t *bin_info;
const arena_chunk_map_misc_t *miscelm;
const void *rpages;
@ -1099,7 +1099,8 @@ arena_bin_index(arena_t *arena, arena_bin_t *bin)
}
JEMALLOC_INLINE size_t
arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
arena_run_regind(arena_run_t *run, const arena_bin_info_t *bin_info,
const void *ptr)
{
size_t diff, interval, shift, regind;
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);

View File

@ -40,6 +40,36 @@ lg() {
done
}
run_size() {
lg_p=$1
lg_grp=$2
lg_delta=$3
ndelta=$4
pow2 ${lg_p}; p=${pow2_result}
pow2 ${lg_grp}; grp=${pow2_result}
pow2 ${lg_delta}; delta=${pow2_result}
reg_size=$((${grp} + ${delta}*${ndelta}))
# Compute smallest run size that is an integer multiple of reg_size.
try_run_size=${p}
try_nregs=$((${try_run_size} / ${reg_size}))
perfect=0
while [ ${perfect} -eq 0 ] ; do
perfect_run_size=${try_run_size}
perfect_nregs=${try_nregs}
try_run_size=$((${try_run_size} + ${p}))
try_nregs=$((${try_run_size} / ${reg_size}))
if [ ${perfect_run_size} -eq $((${perfect_nregs} * ${reg_size})) ] ; then
perfect=1
fi
done
run_size_pgs=$((${perfect_run_size} / ${p}))
}
size_class() {
index=$1
lg_grp=$2
@ -65,8 +95,10 @@ size_class() {
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
bin="yes"
run_size ${lg_p} ${lg_grp} ${lg_delta} ${ndelta}; pgs=${run_size_pgs}
else
bin="no"
pgs=0
fi
if [ ${lg_size} -lt ${lg_kmax} \
-o ${lg_size} -eq ${lg_kmax} -a ${rem} = "no" ] ; then
@ -74,10 +106,11 @@ size_class() {
else
lg_delta_lookup="no"
fi
printf ' SC(%3d, %6d, %8d, %6d, %3s, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${lg_delta_lookup}
printf ' SC(%3d, %6d, %8d, %6d, %3s, %3d, %2s) \\\n' ${index} ${lg_grp} ${lg_delta} ${ndelta} ${bin} ${pgs} ${lg_delta_lookup}
# Defined upon return:
# - lg_delta_lookup (${lg_delta} or "no")
# - bin ("yes" or "no")
# - pgs
# - lg_delta_lookup (${lg_delta} or "no")
}
sep_line() {
@ -95,12 +128,13 @@ size_classes() {
pow2 ${lg_g}; g=${pow2_result}
echo "#define SIZE_CLASSES \\"
echo " /* index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup */ \\"
echo " /* index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup */ \\"
ntbins=0
nlbins=0
lg_tiny_maxclass='"NA"'
nbins=0
slab_maxpgs=0
# Tiny size classes.
ndelta=0
@ -114,6 +148,9 @@ size_classes() {
fi
if [ ${bin} != "no" ] ; then
nbins=$((${index} + 1))
if [ ${pgs} -gt ${slab_maxpgs} ] ; then
slab_maxpgs=${pgs}
fi
fi
ntbins=$((${ntbins} + 1))
lg_tiny_maxclass=${lg_grp} # Final written value is correct.
@ -133,11 +170,17 @@ size_classes() {
index=$((${index} + 1))
lg_grp=$((${lg_grp} + 1))
lg_delta=$((${lg_delta} + 1))
if [ ${pgs} -gt ${slab_maxpgs} ] ; then
slab_maxpgs=${pgs}
fi
fi
while [ ${ndelta} -lt ${g} ] ; do
size_class ${index} ${lg_grp} ${lg_delta} ${ndelta} ${lg_p} ${lg_kmax}
index=$((${index} + 1))
ndelta=$((${ndelta} + 1))
if [ ${pgs} -gt ${slab_maxpgs} ] ; then
slab_maxpgs=${pgs}
fi
done
# All remaining groups.
@ -161,6 +204,9 @@ size_classes() {
nbins=$((${index} + 1))
# Final written value is correct:
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
if [ ${pgs} -gt ${slab_maxpgs} ] ; then
slab_maxpgs=${pgs}
fi
if [ ${lg_g} -gt 0 ] ; then
lg_large_minclass=$((${lg_grp} + 1))
else
@ -186,6 +232,7 @@ size_classes() {
# - lg_tiny_maxclass
# - lookup_maxclass
# - small_maxclass
# - slab_maxpgs
# - lg_large_minclass
# - huge_maxclass
}
@ -200,14 +247,14 @@ cat <<EOF
* be defined prior to inclusion, and it in turn defines:
*
* LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling.
* SIZE_CLASSES: Complete table of
* SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup)
* tuples.
* SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, bin,
* pgs, lg_delta_lookup) tuples.
* index: Size class index.
* lg_grp: Lg group base size (no deltas added).
* lg_delta: Lg delta to previous size class.
* ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta
* bin: 'yes' if a small bin size class, 'no' otherwise.
* pgs: Run page count if a small bin size class, 0 otherwise.
* lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no'
* otherwise.
* NTBINS: Number of tiny bins.
@ -217,6 +264,7 @@ cat <<EOF
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
* SMALL_MAXCLASS: Maximum small size class.
* SLAB_MAXPGS: Maximum pages in small size class run.
* LG_LARGE_MINCLASS: Lg of minimum large size class.
* HUGE_MAXCLASS: Maximum (huge) size class.
*/
@ -241,6 +289,7 @@ for lg_z in ${lg_zarr} ; do
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
echo "#define SMALL_MAXCLASS ${small_maxclass}"
echo "#define SLAB_MAXPGS ${slab_maxpgs}"
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
echo "#define HUGE_MAXCLASS ${huge_maxclass}"
echo "#endif"

View File

@ -15,14 +15,25 @@ static ssize_t lg_dirty_mult_default;
ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
static ssize_t decay_time_default;
arena_bin_info_t arena_bin_info[NBINS];
const arena_bin_info_t arena_bin_info[NBINS] = {
#define BIN_INFO_bin_yes(reg_size, run_size, nregs) \
{reg_size, run_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
#define BIN_INFO_bin_no(reg_size, run_size, nregs)
#define SC(index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup) \
BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
(pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
(ndelta<<lg_delta)))
SIZE_CLASSES
#undef BIN_INFO_bin_yes
#undef BIN_INFO_bin_no
#undef SC
};
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxrun; /* Max run size for arenas. */
size_t large_maxclass; /* Max large size class. */
size_t run_quantize_max; /* Max run_quantize_*() input. */
static size_t small_maxrun; /* Max run size for small size classes. */
static bool *small_run_tab; /* Valid small run page multiples. */
static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
@ -86,7 +97,8 @@ run_quantize_floor_compute(size_t size)
assert(size == PAGE_CEILING(size));
/* Don't change sizes that are valid small run sizes. */
if (size <= small_maxrun && small_run_tab[size >> LG_PAGE])
if (size <= (ZU(SLAB_MAXPGS) << LG_PAGE) && small_run_tab[size >>
LG_PAGE])
return (size);
/*
@ -121,12 +133,12 @@ run_quantize_ceil_compute_hard(size_t size)
large_pad) + 1) + large_pad);
} else
large_run_size_next = SIZE_T_MAX;
if (size >= small_maxrun)
if ((size >> LG_PAGE) >= ZU(SLAB_MAXPGS))
return (large_run_size_next);
while (true) {
size += PAGE;
assert(size <= small_maxrun);
assert(size <= (ZU(SLAB_MAXPGS) << LG_PAGE));
if (small_run_tab[size >> LG_PAGE]) {
if (large_run_size_next < size)
return (large_run_size_next);
@ -301,7 +313,7 @@ arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
}
JEMALLOC_INLINE_C void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
arena_run_reg_alloc(arena_run_t *run, const arena_bin_info_t *bin_info)
{
void *ret;
size_t regind;
@ -327,7 +339,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
size_t regind = arena_run_regind(run, bin_info, ptr);
assert(run->nfree < bin_info->nregs);
@ -1822,7 +1834,7 @@ arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
/* Skip small run. */
size_t binind = arena_mapbits_binind_get(chunk,
pageind);
arena_bin_info_t *bin_info =
const arena_bin_info_t *bin_info =
&arena_bin_info[binind];
npages = bin_info->run_size >> LG_PAGE;
}
@ -2045,7 +2057,7 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
assert(size == PAGE || arena_mapbits_large_size_get(chunk,
run_ind+(size>>LG_PAGE)-1) == 0);
} else {
arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
const arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
size = bin_info->run_size;
}
@ -2241,7 +2253,7 @@ arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
arena_bin_info_t *bin_info;
const arena_bin_info_t *bin_info;
/* Look for a usable run. */
run = arena_bin_nonfull_run_tryget(bin);
@ -2291,7 +2303,7 @@ static void *
arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
arena_bin_info_t *bin_info;
const arena_bin_info_t *bin_info;
arena_run_t *run;
binind = arena_bin_index(arena, bin);
@ -2390,7 +2402,7 @@ arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero)
{
if (!zero)
@ -2402,7 +2414,7 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info)
{
memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
@ -2706,7 +2718,7 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
else {
szind_t binind = arena_bin_index(extent_node_arena_get(
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
const arena_bin_info_t *bin_info = &arena_bin_info[binind];
/*
* The following block's conditional is necessary because if the
@ -2768,7 +2780,7 @@ arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
size_t pageind, rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
arena_bin_info_t *bin_info;
const arena_bin_info_t *bin_info;
szind_t binind;
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
@ -3483,81 +3495,24 @@ arena_new(tsdn_t *tsdn, unsigned ind)
return (arena);
}
/*
* Calculate bin_info->run_size such that it meets the following constraints:
*
* *) bin_info->run_size <= arena_maxrun
* *) bin_info->nregs <= RUN_MAXREGS
*
* bin_info->nregs is also calculated here, since these settings are all
* interdependent.
*/
static void
bin_info_run_size_calc(arena_bin_info_t *bin_info)
{
size_t try_run_size, perfect_run_size, actual_run_size;
uint32_t try_nregs, perfect_nregs, actual_nregs;
/* Compute smallest run size that is an integer multiple of reg_size. */
try_run_size = PAGE;
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
do {
perfect_run_size = try_run_size;
perfect_nregs = try_nregs;
try_run_size += PAGE;
try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
assert(perfect_run_size <= arena_maxrun);
assert(perfect_nregs <= RUN_MAXREGS);
actual_run_size = perfect_run_size;
actual_nregs = (uint32_t)((actual_run_size) / bin_info->reg_size);
/* Copy final settings. */
bin_info->run_size = actual_run_size;
bin_info->nregs = actual_nregs;
if (actual_run_size > small_maxrun)
small_maxrun = actual_run_size;
}
static void
bin_info_init(void)
{
arena_bin_info_t *bin_info;
#define BIN_INFO_INIT_bin_yes(index, size) \
bin_info = &arena_bin_info[index]; \
bin_info->reg_size = size; \
bin_info_run_size_calc(bin_info); \
bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
#define BIN_INFO_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef BIN_INFO_INIT_bin_yes
#undef BIN_INFO_INIT_bin_no
#undef SC
}
static bool
small_run_size_init(void)
{
assert(small_maxrun != 0);
assert(SLAB_MAXPGS != 0);
small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
LG_PAGE));
small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * SLAB_MAXPGS);
if (small_run_tab == NULL)
return (true);
#define TAB_INIT_bin_yes(index, size) { \
arena_bin_info_t *bin_info = &arena_bin_info[index]; \
const arena_bin_info_t *bin_info = \
&arena_bin_info[index]; \
small_run_tab[bin_info->run_size >> LG_PAGE] = true; \
}
#define TAB_INIT_bin_no(index, size)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
#define SC(index, lg_grp, lg_delta, ndelta, bin, run_size, \
lg_delta_lookup) \
TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
SIZE_CLASSES
#undef TAB_INIT_bin_yes
@ -3643,7 +3598,6 @@ arena_boot(void)
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
nhclasses = NSIZES - nlclasses - NBINS;
bin_info_init();
if (small_run_size_init())
return (true);
if (run_quantize_init())

View File

@ -81,7 +81,7 @@ static uint8_t malloc_slow_flags;
/* Last entry for overflow detection only. */
JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES+1] = {
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
#define SC(index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
@ -154,7 +154,7 @@ const uint8_t size2index_tab[] = {
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
#define SC(index, lg_grp, lg_delta, ndelta, bin, pgs, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3

View File

@ -23,7 +23,7 @@ watch_junking(void *p)
}
static void
arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info)
arena_dalloc_junk_small_intercept(void *ptr, const arena_bin_info_t *bin_info)
{
size_t i;