Header refactoring: Pull size helpers out of jemalloc module.

This commit is contained in:
David Goldblatt
2017-05-30 10:45:37 -07:00
committed by David Goldblatt
parent 041e041e1f
commit 8261e581be
28 changed files with 636 additions and 590 deletions

View File

@@ -289,7 +289,7 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents;
arena_stats_accum_zu(&astats->allocated_large,
curlextents * index2size(NBINS + i));
curlextents * sz_index2size(NBINS + i));
}
arena_stats_unlock(tsdn, &arena->stats);
@@ -303,12 +303,12 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (; i < NBINS; i++) {
tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
arena_stats_accum_zu(&astats->tcache_bytes,
tbin->ncached * index2size(i));
tbin->ncached * sz_index2size(i));
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
arena_stats_accum_zu(&astats->tcache_bytes,
tbin->ncached * index2size(i));
tbin->ncached * sz_index2size(i));
}
}
malloc_mutex_prof_read(tsdn,
@@ -467,7 +467,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
}
index = size2index(usize);
index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats,
@@ -483,7 +483,7 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
if (usize < LARGE_MINCLASS) {
usize = LARGE_MINCLASS;
}
index = size2index(usize);
index = sz_size2index(usize);
hindex = (index >= NBINS) ? index - NBINS : 0;
arena_stats_add_u64(tsdn, &arena->stats,
@@ -505,21 +505,22 @@ arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
szind_t szind = size2index(usize);
szind_t szind = sz_size2index(usize);
size_t mapped_add;
bool commit = true;
extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_dirty, NULL, usize, large_pad, alignment, false,
&arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
szind, zero, &commit);
if (extent == NULL) {
extent = extents_alloc(tsdn, arena, &extent_hooks,
&arena->extents_muzzy, NULL, usize, large_pad, alignment,
&arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
false, szind, zero, &commit);
}
size_t size = usize + large_pad;
size_t size = usize + sz_large_pad;
if (extent == NULL) {
extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
usize, large_pad, alignment, false, szind, zero, &commit);
usize, sz_large_pad, alignment, false, szind, zero,
&commit);
if (config_stats) {
/*
* extent may be NULL on OOM, but in that case
@@ -1146,7 +1147,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
assert(alloc_ctx.szind != NSIZES);
if (config_stats || (config_prof && opt_prof)) {
usize = index2size(alloc_ctx.szind);
usize = sz_index2size(alloc_ctx.szind);
assert(usize == isalloc(tsd_tsdn(tsd), ptr));
}
/* Remove large allocation from prof sample set. */
@@ -1278,7 +1279,7 @@ arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
WITNESS_RANK_CORE, 0);
extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
szind_t szind = size2index(bin_info->reg_size);
szind_t szind = sz_size2index(bin_info->reg_size);
bool zero = false;
bool commit = true;
extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
@@ -1484,7 +1485,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
assert(binind < NBINS);
bin = &arena->bins[binind];
usize = index2size(binind);
usize = sz_index2size(binind);
malloc_mutex_lock(tsdn, &bin->lock);
if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
@@ -1544,7 +1545,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
if (likely(size <= SMALL_MAXCLASS)) {
return arena_malloc_small(tsdn, arena, ind, zero);
}
return large_malloc(tsdn, arena, index2size(ind), zero);
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
}
void *
@@ -1555,8 +1556,8 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special slab placement. */
ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true);
} else {
if (likely(alignment <= CACHELINE)) {
ret = large_malloc(tsdn, arena, usize, zero);
@@ -1581,7 +1582,7 @@ arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
(uintptr_t)ptr, true);
arena_t *arena = extent_arena_get(extent);
szind_t szind = size2index(usize);
szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
szind, false);
@@ -1617,7 +1618,7 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
size_t usize = arena_prof_demote(tsdn, extent, ptr);
if (usize <= tcache_maxclass) {
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
size2index(usize), slow_path);
sz_size2index(usize), slow_path);
} else {
large_dalloc(tsdn, extent);
}
@@ -1751,17 +1752,17 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
}
extent_t *extent = iealloc(tsdn, ptr);
size_t usize_min = s2u(size);
size_t usize_max = s2u(size + extra);
size_t usize_min = sz_s2u(size);
size_t usize_max = sz_s2u(size + extra);
if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
assert(arena_bin_info[size2index(oldsize)].reg_size ==
assert(arena_bin_info[sz_size2index(oldsize)].reg_size ==
oldsize);
if ((usize_max > SMALL_MAXCLASS || size2index(usize_max) !=
size2index(oldsize)) && (size > oldsize || usize_max <
if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
sz_size2index(oldsize)) && (size > oldsize || usize_max <
oldsize)) {
return true;
}
@@ -1780,10 +1781,10 @@ static void *
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache) {
if (alignment == 0) {
return arena_malloc(tsdn, arena, usize, size2index(usize),
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true);
}
usize = sa2u(usize, alignment);
usize = sz_sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return NULL;
}
@@ -1793,7 +1794,7 @@ arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache) {
size_t usize = s2u(size);
size_t usize = sz_s2u(size);
if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
return NULL;
}
@@ -1998,7 +1999,7 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
goto label_error;
}
arena->extent_grow_next = psz2ind(HUGEPAGE);
arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
goto label_error;

View File

@@ -5,6 +5,7 @@
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
@@ -121,8 +122,8 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor = size2index(extent_bsize_get(extent) + 1) -
1;
szind_t index_floor =
sz_size2index(extent_bsize_get(extent) + 1) - 1;
extent_heap_insert(&base->avail[index_floor], extent);
}
@@ -171,11 +172,11 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
* HUGEPAGE), or a size large enough to satisfy the requested size and
* alignment, whichever is larger.
*/
size_t min_block_size = HUGEPAGE_CEILING(psz2u(header_size + gap_size +
usize));
size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ usize));
pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
*pind_last;
size_t next_block_size = HUGEPAGE_CEILING(pind2sz(pind_next));
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
base_block_t *block = (base_block_t *)base_map(extent_hooks, ind,
@@ -183,7 +184,7 @@ base_block_alloc(extent_hooks_t *extent_hooks, unsigned ind,
if (block == NULL) {
return NULL;
}
*pind_last = psz2ind(block_size);
*pind_last = sz_psz2ind(block_size);
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
@@ -304,7 +305,7 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
extent_t *extent = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = size2index(asize); i < NSIZES; i++) {
for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
extent = extent_heap_remove_first(&base->avail[i]);
if (extent != NULL) {
/* Use existing space. */

View File

@@ -274,7 +274,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh) {
size_t usize;
lg_curcells++;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
@@ -319,7 +319,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return;
}
@@ -395,7 +395,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh->hash = hash;
ckh->keycomp = keycomp;
usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;

View File

@@ -701,7 +701,7 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
for (i = 0; i < NBINS; i++) {
ctl_arena->astats->allocated_small +=
ctl_arena->astats->bstats[i].curregs *
index2size(i);
sz_index2size(i);
ctl_arena->astats->nmalloc_small +=
ctl_arena->astats->bstats[i].nmalloc;
ctl_arena->astats->ndalloc_small +=
@@ -2274,7 +2274,8 @@ arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
}
CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
CTL_RO_NL_GEN(arenas_lextent_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
size_t i) {

View File

@@ -220,7 +220,7 @@ extent_size_quantize_floor(size_t size) {
assert(size > 0);
assert((size & PAGE_MASK) == 0);
pind = psz2ind(size - large_pad + 1);
pind = sz_psz2ind(size - sz_large_pad + 1);
if (pind == 0) {
/*
* Avoid underflow. This short-circuit would also do the right
@@ -230,7 +230,7 @@ extent_size_quantize_floor(size_t size) {
*/
return size;
}
ret = pind2sz(pind - 1) + large_pad;
ret = sz_pind2sz(pind - 1) + sz_large_pad;
assert(ret <= size);
return ret;
}
@@ -243,7 +243,7 @@ extent_size_quantize_ceil(size_t size) {
size_t ret;
assert(size > 0);
assert(size - large_pad <= LARGE_MAXCLASS);
assert(size - sz_large_pad <= LARGE_MAXCLASS);
assert((size & PAGE_MASK) == 0);
ret = extent_size_quantize_floor(size);
@@ -256,7 +256,8 @@ extent_size_quantize_ceil(size_t size) {
* search would potentially find sufficiently aligned available
* memory somewhere lower.
*/
ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
sz_large_pad;
}
return ret;
}
@@ -300,7 +301,7 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = psz2ind(psz);
pszind_t pind = sz_psz2ind(psz);
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_unset(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
@@ -329,7 +330,7 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
size_t size = extent_size_get(extent);
size_t psz = extent_size_quantize_floor(size);
pszind_t pind = psz2ind(psz);
pszind_t pind = sz_psz2ind(psz);
extent_heap_remove(&extents->heaps[pind], extent);
if (extent_heap_empty(&extents->heaps[pind])) {
bitmap_set(extents->bitmap, &extents_bitmap_info,
@@ -354,7 +355,7 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
static extent_t *
extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t size) {
pszind_t pind = psz2ind(extent_size_quantize_ceil(size));
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
(size_t)pind);
if (i < NPSIZES+1) {
@@ -376,7 +377,7 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
size_t size) {
extent_t *ret = NULL;
pszind_t pind = psz2ind(extent_size_quantize_ceil(size));
pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
&extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
(pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
@@ -1040,7 +1041,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
* satisfy this request.
*/
pszind_t egn_skip = 0;
size_t alloc_size = pind2sz(arena->extent_grow_next + egn_skip);
size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
while (alloc_size < alloc_size_min) {
egn_skip++;
if (arena->extent_grow_next + egn_skip == NPSIZES) {
@@ -1048,7 +1049,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
goto label_err;
}
assert(arena->extent_grow_next + egn_skip < NPSIZES);
alloc_size = pind2sz(arena->extent_grow_next + egn_skip);
alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
}
extent_t *extent = extent_alloc(tsdn, arena);
@@ -1369,7 +1370,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
extent_unlock(tsdn, prev);
if (can_coalesce && !extent_coalesce(tsdn, arena,
r_extent_hooks, extents, extent, prev, false,
r_extent_hooks, extents, extent, prev, false,
growing_retained)) {
extent = prev;
if (extents->delay_coalesce) {

View File

@@ -13,6 +13,7 @@
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/size_classes.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
@@ -107,110 +108,6 @@ enum {
};
static uint8_t malloc_slow_flags;
JEMALLOC_ALIGNED(CACHELINE)
const size_t pind2sz_tab[NPSIZES+1] = {
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
#define PSZ_no(lg_grp, ndelta, lg_delta)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
PSZ_##psz(lg_grp, ndelta, lg_delta)
SIZE_CLASSES
#undef PSZ_yes
#undef PSZ_no
#undef SC
(LARGE_MAXCLASS + PAGE)
};
JEMALLOC_ALIGNED(CACHELINE)
const size_t index2size_tab[NSIZES] = {
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
};
JEMALLOC_ALIGNED(CACHELINE)
const uint8_t size2index_tab[] = {
#if LG_TINY_MIN == 0
#warning "Dangerous LG_TINY_MIN"
#define S2B_0(i) i,
#elif LG_TINY_MIN == 1
#warning "Dangerous LG_TINY_MIN"
#define S2B_1(i) i,
#elif LG_TINY_MIN == 2
#warning "Dangerous LG_TINY_MIN"
#define S2B_2(i) i,
#elif LG_TINY_MIN == 3
#define S2B_3(i) i,
#elif LG_TINY_MIN == 4
#define S2B_4(i) i,
#elif LG_TINY_MIN == 5
#define S2B_5(i) i,
#elif LG_TINY_MIN == 6
#define S2B_6(i) i,
#elif LG_TINY_MIN == 7
#define S2B_7(i) i,
#elif LG_TINY_MIN == 8
#define S2B_8(i) i,
#elif LG_TINY_MIN == 9
#define S2B_9(i) i,
#elif LG_TINY_MIN == 10
#define S2B_10(i) i,
#elif LG_TINY_MIN == 11
#define S2B_11(i) i,
#else
#error "Unsupported LG_TINY_MIN"
#endif
#if LG_TINY_MIN < 1
#define S2B_1(i) S2B_0(i) S2B_0(i)
#endif
#if LG_TINY_MIN < 2
#define S2B_2(i) S2B_1(i) S2B_1(i)
#endif
#if LG_TINY_MIN < 3
#define S2B_3(i) S2B_2(i) S2B_2(i)
#endif
#if LG_TINY_MIN < 4
#define S2B_4(i) S2B_3(i) S2B_3(i)
#endif
#if LG_TINY_MIN < 5
#define S2B_5(i) S2B_4(i) S2B_4(i)
#endif
#if LG_TINY_MIN < 6
#define S2B_6(i) S2B_5(i) S2B_5(i)
#endif
#if LG_TINY_MIN < 7
#define S2B_7(i) S2B_6(i) S2B_6(i)
#endif
#if LG_TINY_MIN < 8
#define S2B_8(i) S2B_7(i) S2B_7(i)
#endif
#if LG_TINY_MIN < 9
#define S2B_9(i) S2B_8(i) S2B_8(i)
#endif
#if LG_TINY_MIN < 10
#define S2B_10(i) S2B_9(i) S2B_9(i)
#endif
#if LG_TINY_MIN < 11
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
#undef S2B_4
#undef S2B_5
#undef S2B_6
#undef S2B_7
#undef S2B_8
#undef S2B_9
#undef S2B_10
#undef S2B_11
#undef S2B_no
#undef SC
};
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
# define NO_INITIALIZER ((unsigned long)0)
@@ -333,7 +230,7 @@ a0ialloc(size_t size, bool zero, bool is_internal) {
return NULL;
}
return iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
is_internal, arena_get(TSDN_NULL, 0, true), true);
}
@@ -1687,10 +1584,11 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
size_t bumped_usize = usize;
if (usize <= SMALL_MAXCLASS) {
assert(((dopts->alignment == 0) ? s2u(LARGE_MINCLASS) :
sa2u(LARGE_MINCLASS, dopts->alignment)) == LARGE_MINCLASS);
ind_large = size2index(LARGE_MINCLASS);
bumped_usize = s2u(LARGE_MINCLASS);
assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
sz_sa2u(LARGE_MINCLASS, dopts->alignment))
== LARGE_MINCLASS);
ind_large = sz_size2index(LARGE_MINCLASS);
bumped_usize = sz_s2u(LARGE_MINCLASS);
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
bumped_usize, ind_large);
if (unlikely(ret == NULL)) {
@@ -1792,16 +1690,16 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
/* This is the beginning of the "core" algorithm. */
if (dopts->alignment == 0) {
ind = size2index(size);
ind = sz_size2index(size);
if (unlikely(ind >= NSIZES)) {
goto label_oom;
}
if (config_stats || (config_prof && opt_prof)) {
usize = index2size(ind);
usize = sz_index2size(ind);
assert(usize > 0 && usize <= LARGE_MAXCLASS);
}
} else {
usize = sa2u(size, dopts->alignment);
usize = sz_sa2u(size, dopts->alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
goto label_oom;
}
@@ -2155,10 +2053,10 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
size_t usize;
if (config_prof && opt_prof) {
usize = index2size(alloc_ctx.szind);
usize = sz_index2size(alloc_ctx.szind);
prof_free(tsd, ptr, usize, &alloc_ctx);
} else if (config_stats) {
usize = index2size(alloc_ctx.szind);
usize = sz_index2size(alloc_ctx.szind);
}
if (config_stats) {
*tsd_thread_deallocatedp_get(tsd) += usize;
@@ -2192,7 +2090,7 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind == size2index(usize));
assert(alloc_ctx.szind == sz_size2index(usize));
ctx = &alloc_ctx;
prof_free(tsd, ptr, usize, ctx);
} else {
@@ -2247,16 +2145,16 @@ je_realloc(void *ptr, size_t size) {
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
old_usize = index2size(alloc_ctx.szind);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
usize = s2u(size);
usize = sz_s2u(size);
ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
NULL : irealloc_prof(tsd, ptr, old_usize, usize,
&alloc_ctx);
} else {
if (config_stats) {
usize = s2u(size);
usize = sz_s2u(size);
}
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
@@ -2601,10 +2499,11 @@ je_rallocx(void *ptr, size_t size, int flags) {
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
old_usize = index2size(alloc_ctx.szind);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
usize = (alignment == 0) ?
sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
goto label_oom;
}
@@ -2685,10 +2584,10 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
* prof_realloc() will use the actual usize to decide whether to sample.
*/
if (alignment == 0) {
usize_max = s2u(size+extra);
usize_max = sz_s2u(size+extra);
assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
} else {
usize_max = sa2u(size+extra, alignment);
usize_max = sz_sa2u(size+extra, alignment);
if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
/*
* usize_max is out of range, and chances are that
@@ -2737,7 +2636,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
assert(alloc_ctx.szind != NSIZES);
old_usize = index2size(alloc_ctx.szind);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
/*
* The API explicitly absolves itself of protecting against (size +
@@ -2847,9 +2746,9 @@ inallocx(tsdn_t *tsdn, size_t size, int flags) {
size_t usize;
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
usize = s2u(size);
usize = sz_s2u(size);
} else {
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
}
witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
return usize;

View File

@@ -12,7 +12,7 @@
void *
large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) {
assert(usize == s2u(usize));
assert(usize == sz_s2u(usize));
return large_palloc(tsdn, arena, usize, CACHELINE, zero);
}
@@ -27,7 +27,7 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
assert(!tsdn_null(tsdn) || arena != NULL);
ausize = sa2u(usize, alignment);
ausize = sz_sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) {
return NULL;
}
@@ -97,7 +97,7 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
arena_t *arena = extent_arena_get(extent);
size_t oldusize = extent_usize_get(extent);
extent_hooks_t *extent_hooks = extent_hooks_get(arena);
size_t diff = extent_size_get(extent) - (usize + large_pad);
size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
assert(oldusize > usize);
@@ -108,8 +108,8 @@ large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
/* Split excess pages. */
if (diff != 0) {
extent_t *trail = extent_split_wrapper(tsdn, arena,
&extent_hooks, extent, usize + large_pad, size2index(usize),
false, diff, NSIZES, false);
&extent_hooks, extent, usize + sz_large_pad,
sz_size2index(usize), false, diff, NSIZES, false);
if (trail == NULL) {
return true;
}
@@ -178,7 +178,7 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
}
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = size2index(usize);
szind_t szind = sz_size2index(usize);
extent_szind_set(extent, szind);
rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)extent_addr_get(extent), szind, false);

View File

@@ -556,7 +556,7 @@ prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
*/
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
true);
if (gctx == NULL) {
return NULL;
@@ -819,7 +819,7 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
/* Link a prof_tctx_t into gctx for this thread. */
ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
size2index(sizeof(prof_tctx_t)), false, NULL, true,
sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
arena_ichoose(tsd, NULL), true);
if (ret.p == NULL) {
if (new_gctx) {
@@ -1899,7 +1899,7 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
/* Initialize an empty cache for this thread. */
tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
size2index(sizeof(prof_tdata_t)), false, NULL, true,
sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (tdata == NULL) {
return NULL;
@@ -2135,7 +2135,7 @@ prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
return "";
}
ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
arena_get(TSDN_NULL, 0, true), true);
if (ret == NULL) {
return NULL;

106
src/sz.c Normal file
View File

@@ -0,0 +1,106 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
const size_t sz_pind2sz_tab[NPSIZES+1] = {
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
#define PSZ_no(lg_grp, ndelta, lg_delta)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
PSZ_##psz(lg_grp, ndelta, lg_delta)
SIZE_CLASSES
#undef PSZ_yes
#undef PSZ_no
#undef SC
(LARGE_MAXCLASS + PAGE)
};
JEMALLOC_ALIGNED(CACHELINE)
const size_t sz_index2size_tab[NSIZES] = {
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
SIZE_CLASSES
#undef SC
};
JEMALLOC_ALIGNED(CACHELINE)
const uint8_t sz_size2index_tab[] = {
#if LG_TINY_MIN == 0
#warning "Dangerous LG_TINY_MIN"
#define S2B_0(i) i,
#elif LG_TINY_MIN == 1
#warning "Dangerous LG_TINY_MIN"
#define S2B_1(i) i,
#elif LG_TINY_MIN == 2
#warning "Dangerous LG_TINY_MIN"
#define S2B_2(i) i,
#elif LG_TINY_MIN == 3
#define S2B_3(i) i,
#elif LG_TINY_MIN == 4
#define S2B_4(i) i,
#elif LG_TINY_MIN == 5
#define S2B_5(i) i,
#elif LG_TINY_MIN == 6
#define S2B_6(i) i,
#elif LG_TINY_MIN == 7
#define S2B_7(i) i,
#elif LG_TINY_MIN == 8
#define S2B_8(i) i,
#elif LG_TINY_MIN == 9
#define S2B_9(i) i,
#elif LG_TINY_MIN == 10
#define S2B_10(i) i,
#elif LG_TINY_MIN == 11
#define S2B_11(i) i,
#else
#error "Unsupported LG_TINY_MIN"
#endif
#if LG_TINY_MIN < 1
#define S2B_1(i) S2B_0(i) S2B_0(i)
#endif
#if LG_TINY_MIN < 2
#define S2B_2(i) S2B_1(i) S2B_1(i)
#endif
#if LG_TINY_MIN < 3
#define S2B_3(i) S2B_2(i) S2B_2(i)
#endif
#if LG_TINY_MIN < 4
#define S2B_4(i) S2B_3(i) S2B_3(i)
#endif
#if LG_TINY_MIN < 5
#define S2B_5(i) S2B_4(i) S2B_4(i)
#endif
#if LG_TINY_MIN < 6
#define S2B_6(i) S2B_5(i) S2B_5(i)
#endif
#if LG_TINY_MIN < 7
#define S2B_7(i) S2B_6(i) S2B_6(i)
#endif
#if LG_TINY_MIN < 8
#define S2B_8(i) S2B_7(i) S2B_7(i)
#endif
#if LG_TINY_MIN < 9
#define S2B_9(i) S2B_8(i) S2B_8(i)
#endif
#if LG_TINY_MIN < 10
#define S2B_10(i) S2B_9(i) S2B_9(i)
#endif
#if LG_TINY_MIN < 11
#define S2B_11(i) S2B_10(i) S2B_10(i)
#endif
#define S2B_no(i)
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
S2B_##lg_delta_lookup(index)
SIZE_CLASSES
#undef S2B_3
#undef S2B_4
#undef S2B_5
#undef S2B_6
#undef S2B_7
#undef S2B_8
#undef S2B_9
#undef S2B_10
#undef S2B_11
#undef S2B_no
#undef SC
};

View File

@@ -383,7 +383,7 @@ tsd_tcache_data_init(tsd_t *tsd) {
assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
size_t size = stack_nelms * sizeof(void *);
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
size = sz_sa2u(size, CACHELINE);
void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
NULL, true, arena_get(TSDN_NULL, 0, true));
@@ -430,7 +430,7 @@ tcache_create_explicit(tsd_t *tsd) {
stack_offset = size;
size += stack_nelms * sizeof(void *);
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
size = sz_sa2u(size, CACHELINE);
tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
arena_get(TSDN_NULL, 0, true));
@@ -655,7 +655,7 @@ tcache_boot(tsdn_t *tsdn) {
return true;
}
nhbins = size2index(tcache_maxclass) + 1;
nhbins = sz_size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins

View File

@@ -244,7 +244,7 @@ zone_good_size(malloc_zone_t *zone, size_t size) {
if (size == 0) {
size = 1;
}
return s2u(size);
return sz_s2u(size);
}
static kern_return_t