Use "lg" prefix rather than "2pow" suffix to indicate base 2 logarithms.
Remove the default definition for SIZEOF_INT_2POW.
This commit is contained in:
parent
d64d4448cd
commit
94ad2b57c3
@ -102,16 +102,25 @@ if test "x$EXTRA_CFLAGS" != "x" ; then
|
|||||||
fi
|
fi
|
||||||
AC_PROG_CPP
|
AC_PROG_CPP
|
||||||
|
|
||||||
dnl sizeof_ptr is needed by the build system.
|
|
||||||
AC_CHECK_SIZEOF([void *])
|
AC_CHECK_SIZEOF([void *])
|
||||||
if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
|
if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
|
||||||
SIZEOF_PTR_2POW=3
|
LG_SIZEOF_PTR=3
|
||||||
elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
|
elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
|
||||||
SIZEOF_PTR_2POW=2
|
LG_SIZEOF_PTR=2
|
||||||
else
|
else
|
||||||
AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
|
AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
|
||||||
fi
|
fi
|
||||||
AC_DEFINE_UNQUOTED([SIZEOF_PTR_2POW], [$SIZEOF_PTR_2POW])
|
AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR])
|
||||||
|
|
||||||
|
AC_CHECK_SIZEOF([int])
|
||||||
|
if test "x${ac_cv_sizeof_int}" = "x8" ; then
|
||||||
|
LG_SIZEOF_INT=3
|
||||||
|
elif test "x${ac_cv_sizeof_int}" = "x4" ; then
|
||||||
|
LG_SIZEOF_INT=2
|
||||||
|
else
|
||||||
|
AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}])
|
||||||
|
fi
|
||||||
|
AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT])
|
||||||
|
|
||||||
AC_CANONICAL_HOST
|
AC_CANONICAL_HOST
|
||||||
dnl CPU-specific settings.
|
dnl CPU-specific settings.
|
||||||
|
@ -201,45 +201,40 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.183 2008/12/01 10:20:59 jas
|
|||||||
#define STRERROR_BUF 64
|
#define STRERROR_BUF 64
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Minimum alignment of allocations is 2^QUANTUM_2POW bytes.
|
* Minimum alignment of allocations is 2^LG_QUANTUM bytes.
|
||||||
*/
|
*/
|
||||||
#ifdef __i386__
|
#ifdef __i386__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
#ifdef __ia64__
|
#ifdef __ia64__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
#ifdef __alpha__
|
#ifdef __alpha__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
#ifdef __sparc__
|
#ifdef __sparc__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
#ifdef __amd64__
|
#ifdef __amd64__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
#ifdef __arm__
|
#ifdef __arm__
|
||||||
# define QUANTUM_2POW 3
|
# define LG_QUANTUM 3
|
||||||
#endif
|
#endif
|
||||||
#ifdef __mips__
|
#ifdef __mips__
|
||||||
# define QUANTUM_2POW 3
|
# define LG_QUANTUM 3
|
||||||
#endif
|
#endif
|
||||||
#ifdef __powerpc__
|
#ifdef __powerpc__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
#ifdef __s390x__
|
#ifdef __s390x__
|
||||||
# define QUANTUM_2POW 4
|
# define LG_QUANTUM 4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define QUANTUM ((size_t)(1U << QUANTUM_2POW))
|
#define QUANTUM ((size_t)(1U << LG_QUANTUM))
|
||||||
#define QUANTUM_MASK (QUANTUM - 1)
|
#define QUANTUM_MASK (QUANTUM - 1)
|
||||||
|
|
||||||
#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW)
|
#define SIZEOF_PTR (1U << LG_SIZEOF_PTR)
|
||||||
|
|
||||||
/* sizeof(int) == (1U << SIZEOF_INT_2POW). */
|
|
||||||
#ifndef SIZEOF_INT_2POW
|
|
||||||
# define SIZEOF_INT_2POW 2
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
|
/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */
|
||||||
#if (!defined(PIC) && !defined(NO_TLS))
|
#if (!defined(PIC) && !defined(NO_TLS))
|
||||||
@ -250,7 +245,7 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.183 2008/12/01 10:20:59 jas
|
|||||||
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
* Size and alignment of memory chunks that are allocated by the OS's virtual
|
||||||
* memory system.
|
* memory system.
|
||||||
*/
|
*/
|
||||||
#define CHUNK_2POW_DEFAULT 22
|
#define LG_CHUNK_DEFAULT 22
|
||||||
|
|
||||||
/* Maximum number of dirty pages per arena. */
|
/* Maximum number of dirty pages per arena. */
|
||||||
#define DIRTY_MAX_DEFAULT (1U << 9)
|
#define DIRTY_MAX_DEFAULT (1U << 9)
|
||||||
@ -259,8 +254,8 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.183 2008/12/01 10:20:59 jas
|
|||||||
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
|
* Maximum size of L1 cache line. This is used to avoid cache line aliasing.
|
||||||
* In addition, this controls the spacing of cacheline-spaced size classes.
|
* In addition, this controls the spacing of cacheline-spaced size classes.
|
||||||
*/
|
*/
|
||||||
#define CACHELINE_2POW 6
|
#define LG_CACHELINE 6
|
||||||
#define CACHELINE ((size_t)(1U << CACHELINE_2POW))
|
#define CACHELINE ((size_t)(1U << LG_CACHELINE))
|
||||||
#define CACHELINE_MASK (CACHELINE - 1)
|
#define CACHELINE_MASK (CACHELINE - 1)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -270,13 +265,13 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.183 2008/12/01 10:20:59 jas
|
|||||||
* There must be at least 4 subpages per page, due to the way size classes are
|
* There must be at least 4 subpages per page, due to the way size classes are
|
||||||
* handled.
|
* handled.
|
||||||
*/
|
*/
|
||||||
#define SUBPAGE_2POW 8
|
#define LG_SUBPAGE 8
|
||||||
#define SUBPAGE ((size_t)(1U << SUBPAGE_2POW))
|
#define SUBPAGE ((size_t)(1U << LG_SUBPAGE))
|
||||||
#define SUBPAGE_MASK (SUBPAGE - 1)
|
#define SUBPAGE_MASK (SUBPAGE - 1)
|
||||||
|
|
||||||
#ifdef JEMALLOC_TINY
|
#ifdef JEMALLOC_TINY
|
||||||
/* Smallest size class to support. */
|
/* Smallest size class to support. */
|
||||||
# define TINY_MIN_2POW 1
|
# define LG_TINY_MIN 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -284,20 +279,20 @@ __FBSDID("$FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.183 2008/12/01 10:20:59 jas
|
|||||||
* a power of 2. Above this size, allocations are rounded up to the nearest
|
* a power of 2. Above this size, allocations are rounded up to the nearest
|
||||||
* power of 2.
|
* power of 2.
|
||||||
*/
|
*/
|
||||||
#define QSPACE_MAX_2POW_DEFAULT 7
|
#define LG_QSPACE_MAX_DEFAULT 7
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum size class that is a multiple of the cacheline, but not (necessarily)
|
* Maximum size class that is a multiple of the cacheline, but not (necessarily)
|
||||||
* a power of 2. Above this size, allocations are rounded up to the nearest
|
* a power of 2. Above this size, allocations are rounded up to the nearest
|
||||||
* power of 2.
|
* power of 2.
|
||||||
*/
|
*/
|
||||||
#define CSPACE_MAX_2POW_DEFAULT 9
|
#define LG_CSPACE_MAX_DEFAULT 9
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum medium size class. This must not be more than 1/4 of a chunk
|
* Maximum medium size class. This must not be more than 1/4 of a chunk
|
||||||
* (MEDIUM_MAX_2POW_DEFAULT <= CHUNK_2POW_DEFAULT - 2).
|
* (LG_MEDIUM_MAX_DEFAULT <= LG_CHUNK_DEFAULT - 2).
|
||||||
*/
|
*/
|
||||||
#define MEDIUM_MAX_2POW_DEFAULT 15
|
#define LG_MEDIUM_MAX_DEFAULT 15
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
|
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized
|
||||||
@ -768,8 +763,8 @@ static pthread_key_t trace_tsd;
|
|||||||
#ifdef DYNAMIC_PAGE_SHIFT
|
#ifdef DYNAMIC_PAGE_SHIFT
|
||||||
static size_t pagesize;
|
static size_t pagesize;
|
||||||
static size_t pagesize_mask;
|
static size_t pagesize_mask;
|
||||||
static size_t pagesize_2pow;
|
static size_t lg_pagesize;
|
||||||
# define PAGE_SHIFT pagesize_2pow
|
# define PAGE_SHIFT lg_pagesize
|
||||||
# define PAGE_SIZE pagesize
|
# define PAGE_SIZE pagesize
|
||||||
# define PAGE_MASK pagesize_mask
|
# define PAGE_MASK pagesize_mask
|
||||||
#else
|
#else
|
||||||
@ -780,7 +775,7 @@ static size_t pagesize_2pow;
|
|||||||
|
|
||||||
/* Various bin-related settings. */
|
/* Various bin-related settings. */
|
||||||
#ifdef JEMALLOC_TINY /* Number of (2^n)-spaced tiny bins. */
|
#ifdef JEMALLOC_TINY /* Number of (2^n)-spaced tiny bins. */
|
||||||
# define ntbins ((unsigned)(QUANTUM_2POW - TINY_MIN_2POW))
|
# define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN))
|
||||||
#else
|
#else
|
||||||
# define ntbins 0
|
# define ntbins 0
|
||||||
#endif
|
#endif
|
||||||
@ -811,7 +806,7 @@ static size_t medium_max;
|
|||||||
*/
|
*/
|
||||||
#define NMBINS_MAX 16
|
#define NMBINS_MAX 16
|
||||||
/* Spacing between medium size classes. */
|
/* Spacing between medium size classes. */
|
||||||
static size_t mspace_2pow;
|
static size_t lg_mspace;
|
||||||
static size_t mspace_mask;
|
static size_t mspace_mask;
|
||||||
|
|
||||||
static uint8_t const *small_size2bin;
|
static uint8_t const *small_size2bin;
|
||||||
@ -831,7 +826,7 @@ static uint8_t const *small_size2bin;
|
|||||||
#define S2B_256(i) S2B_128(i) S2B_128(i)
|
#define S2B_256(i) S2B_128(i) S2B_128(i)
|
||||||
static const uint8_t const_small_size2bin[STATIC_PAGE_SIZE - 255] = {
|
static const uint8_t const_small_size2bin[STATIC_PAGE_SIZE - 255] = {
|
||||||
S2B_1(0xffU) /* 0 */
|
S2B_1(0xffU) /* 0 */
|
||||||
#if (QUANTUM_2POW == 4)
|
#if (LG_QUANTUM == 4)
|
||||||
/* 64-bit system ************************/
|
/* 64-bit system ************************/
|
||||||
# ifdef JEMALLOC_TINY
|
# ifdef JEMALLOC_TINY
|
||||||
S2B_2(0) /* 2 */
|
S2B_2(0) /* 2 */
|
||||||
@ -1083,10 +1078,10 @@ static size_t opt_tcache_gc = true;
|
|||||||
#endif
|
#endif
|
||||||
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
|
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
|
||||||
static bool opt_print_stats = false;
|
static bool opt_print_stats = false;
|
||||||
static size_t opt_qspace_max_2pow = QSPACE_MAX_2POW_DEFAULT;
|
static size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
|
||||||
static size_t opt_cspace_max_2pow = CSPACE_MAX_2POW_DEFAULT;
|
static size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
|
||||||
static size_t opt_medium_max_2pow = MEDIUM_MAX_2POW_DEFAULT;
|
static size_t opt_lg_medium_max = LG_MEDIUM_MAX_DEFAULT;
|
||||||
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT;
|
static size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
|
||||||
#ifdef JEMALLOC_TRACE
|
#ifdef JEMALLOC_TRACE
|
||||||
static bool opt_trace = false;
|
static bool opt_trace = false;
|
||||||
#endif
|
#endif
|
||||||
@ -2370,7 +2365,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
|
|||||||
/* Usable allocation found. */
|
/* Usable allocation found. */
|
||||||
bit = ffs((int)mask) - 1;
|
bit = ffs((int)mask) - 1;
|
||||||
|
|
||||||
regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
|
regind = ((i << (LG_SIZEOF_INT + 3)) + bit);
|
||||||
assert(regind < bin->nregs);
|
assert(regind < bin->nregs);
|
||||||
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
|
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
|
||||||
+ (bin->reg_size * regind));
|
+ (bin->reg_size * regind));
|
||||||
@ -2388,7 +2383,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
|
|||||||
/* Usable allocation found. */
|
/* Usable allocation found. */
|
||||||
bit = ffs((int)mask) - 1;
|
bit = ffs((int)mask) - 1;
|
||||||
|
|
||||||
regind = ((i << (SIZEOF_INT_2POW + 3)) + bit);
|
regind = ((i << (LG_SIZEOF_INT + 3)) + bit);
|
||||||
assert(regind < bin->nregs);
|
assert(regind < bin->nregs);
|
||||||
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
|
ret = (void *)(((uintptr_t)run) + bin->reg0_offset
|
||||||
+ (bin->reg_size * regind));
|
+ (bin->reg_size * regind));
|
||||||
@ -2470,10 +2465,10 @@ arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size)
|
|||||||
assert(diff == regind * size);
|
assert(diff == regind * size);
|
||||||
assert(regind < bin->nregs);
|
assert(regind < bin->nregs);
|
||||||
|
|
||||||
elm = regind >> (SIZEOF_INT_2POW + 3);
|
elm = regind >> (LG_SIZEOF_INT + 3);
|
||||||
if (elm < run->regs_minelm)
|
if (elm < run->regs_minelm)
|
||||||
run->regs_minelm = elm;
|
run->regs_minelm = elm;
|
||||||
bit = regind - (elm << (SIZEOF_INT_2POW + 3));
|
bit = regind - (elm << (LG_SIZEOF_INT + 3));
|
||||||
assert((run->regs_mask[elm] & (1U << bit)) == 0);
|
assert((run->regs_mask[elm] & (1U << bit)) == 0);
|
||||||
run->regs_mask[elm] |= (1U << bit);
|
run->regs_mask[elm] |= (1U << bit);
|
||||||
}
|
}
|
||||||
@ -2905,12 +2900,12 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
|
|||||||
|
|
||||||
for (i = 0; i < bin->regs_mask_nelms - 1; i++)
|
for (i = 0; i < bin->regs_mask_nelms - 1; i++)
|
||||||
run->regs_mask[i] = UINT_MAX;
|
run->regs_mask[i] = UINT_MAX;
|
||||||
remainder = bin->nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1);
|
remainder = bin->nregs & ((1U << (LG_SIZEOF_INT + 3)) - 1);
|
||||||
if (remainder == 0)
|
if (remainder == 0)
|
||||||
run->regs_mask[i] = UINT_MAX;
|
run->regs_mask[i] = UINT_MAX;
|
||||||
else {
|
else {
|
||||||
/* The last element has spare bits that need to be unset. */
|
/* The last element has spare bits that need to be unset. */
|
||||||
run->regs_mask[i] = (UINT_MAX >> ((1U << (SIZEOF_INT_2POW + 3))
|
run->regs_mask[i] = (UINT_MAX >> ((1U << (LG_SIZEOF_INT + 3))
|
||||||
- remainder));
|
- remainder));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2997,8 +2992,8 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
|
|||||||
+ 1; /* Counter-act try_nregs-- in loop. */
|
+ 1; /* Counter-act try_nregs-- in loop. */
|
||||||
do {
|
do {
|
||||||
try_nregs--;
|
try_nregs--;
|
||||||
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
|
try_mask_nelms = (try_nregs >> (LG_SIZEOF_INT + 3)) +
|
||||||
((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ? 1 : 0);
|
((try_nregs & ((1U << (LG_SIZEOF_INT + 3)) - 1)) ? 1 : 0);
|
||||||
try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
|
try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
|
||||||
} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
|
} while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))
|
||||||
> try_reg0_offset);
|
> try_reg0_offset);
|
||||||
@ -3019,8 +3014,8 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
|
|||||||
bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
|
bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
|
||||||
do {
|
do {
|
||||||
try_nregs--;
|
try_nregs--;
|
||||||
try_mask_nelms = (try_nregs >> (SIZEOF_INT_2POW + 3)) +
|
try_mask_nelms = (try_nregs >> (LG_SIZEOF_INT + 3)) +
|
||||||
((try_nregs & ((1U << (SIZEOF_INT_2POW + 3)) - 1)) ?
|
((try_nregs & ((1U << (LG_SIZEOF_INT + 3)) - 1)) ?
|
||||||
1 : 0);
|
1 : 0);
|
||||||
try_reg0_offset = try_run_size - (try_nregs *
|
try_reg0_offset = try_run_size - (try_nregs *
|
||||||
bin->reg_size);
|
bin->reg_size);
|
||||||
@ -3032,7 +3027,7 @@ arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
|
|||||||
|
|
||||||
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
|
assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1))
|
||||||
<= good_reg0_offset);
|
<= good_reg0_offset);
|
||||||
assert((good_mask_nelms << (SIZEOF_INT_2POW + 3)) >= good_nregs);
|
assert((good_mask_nelms << (LG_SIZEOF_INT + 3)) >= good_nregs);
|
||||||
|
|
||||||
/* Copy final settings. */
|
/* Copy final settings. */
|
||||||
bin->run_size = good_run_size;
|
bin->run_size = good_run_size;
|
||||||
@ -3152,7 +3147,7 @@ tcache_alloc(tcache_t *tcache, size_t size, bool zero)
|
|||||||
binind = small_size2bin[size];
|
binind = small_size2bin[size];
|
||||||
else {
|
else {
|
||||||
binind = mbin0 + ((MEDIUM_CEILING(size) - medium_min) >>
|
binind = mbin0 + ((MEDIUM_CEILING(size) - medium_min) >>
|
||||||
mspace_2pow);
|
lg_mspace);
|
||||||
}
|
}
|
||||||
assert(binind < nbins);
|
assert(binind < nbins);
|
||||||
tbin = tcache->tbins[binind];
|
tbin = tcache->tbins[binind];
|
||||||
@ -3258,7 +3253,7 @@ arena_malloc_medium(arena_t *arena, size_t size, bool zero)
|
|||||||
size_t binind;
|
size_t binind;
|
||||||
|
|
||||||
size = MEDIUM_CEILING(size);
|
size = MEDIUM_CEILING(size);
|
||||||
binind = mbin0 + ((size - medium_min) >> mspace_2pow);
|
binind = mbin0 + ((size - medium_min) >> lg_mspace);
|
||||||
assert(binind < nbins);
|
assert(binind < nbins);
|
||||||
bin = &arena->bins[binind];
|
bin = &arena->bins[binind];
|
||||||
assert(bin->reg_size == size);
|
assert(bin->reg_size == size);
|
||||||
@ -4272,7 +4267,7 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
bin->runcur = NULL;
|
bin->runcur = NULL;
|
||||||
arena_run_tree_new(&bin->runs);
|
arena_run_tree_new(&bin->runs);
|
||||||
|
|
||||||
bin->reg_size = (1U << (TINY_MIN_2POW + i));
|
bin->reg_size = (1U << (LG_TINY_MIN + i));
|
||||||
|
|
||||||
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
||||||
|
|
||||||
@ -4288,7 +4283,7 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
bin->runcur = NULL;
|
bin->runcur = NULL;
|
||||||
arena_run_tree_new(&bin->runs);
|
arena_run_tree_new(&bin->runs);
|
||||||
|
|
||||||
bin->reg_size = (i - ntbins + 1) << QUANTUM_2POW;
|
bin->reg_size = (i - ntbins + 1) << LG_QUANTUM;
|
||||||
|
|
||||||
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
||||||
|
|
||||||
@ -4304,7 +4299,7 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
arena_run_tree_new(&bin->runs);
|
arena_run_tree_new(&bin->runs);
|
||||||
|
|
||||||
bin->reg_size = cspace_min + ((i - (ntbins + nqbins)) <<
|
bin->reg_size = cspace_min + ((i - (ntbins + nqbins)) <<
|
||||||
CACHELINE_2POW);
|
LG_CACHELINE);
|
||||||
|
|
||||||
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
||||||
|
|
||||||
@ -4320,7 +4315,7 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
arena_run_tree_new(&bin->runs);
|
arena_run_tree_new(&bin->runs);
|
||||||
|
|
||||||
bin->reg_size = sspace_min + ((i - (ntbins + nqbins + ncbins))
|
bin->reg_size = sspace_min + ((i - (ntbins + nqbins + ncbins))
|
||||||
<< SUBPAGE_2POW);
|
<< LG_SUBPAGE);
|
||||||
|
|
||||||
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
||||||
|
|
||||||
@ -4336,7 +4331,7 @@ arena_new(arena_t *arena, unsigned ind)
|
|||||||
arena_run_tree_new(&bin->runs);
|
arena_run_tree_new(&bin->runs);
|
||||||
|
|
||||||
bin->reg_size = medium_min + ((i - (ntbins + nqbins + ncbins +
|
bin->reg_size = medium_min + ((i - (ntbins + nqbins + ncbins +
|
||||||
nsbins)) << mspace_2pow);
|
nsbins)) << lg_mspace);
|
||||||
|
|
||||||
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
|
||||||
|
|
||||||
@ -5064,10 +5059,10 @@ malloc_print_stats(void)
|
|||||||
malloc_message("Cacheline size (assumed): ",
|
malloc_message("Cacheline size (assumed): ",
|
||||||
umax2s(CACHELINE, 10, s), "\n", "");
|
umax2s(CACHELINE, 10, s), "\n", "");
|
||||||
malloc_message("Subpage spacing: ", umax2s(SUBPAGE, 10, s), "\n", "");
|
malloc_message("Subpage spacing: ", umax2s(SUBPAGE, 10, s), "\n", "");
|
||||||
malloc_message("Medium spacing: ", umax2s((1U << mspace_2pow), 10, s),
|
malloc_message("Medium spacing: ", umax2s((1U << lg_mspace), 10, s),
|
||||||
"\n", "");
|
"\n", "");
|
||||||
#ifdef JEMALLOC_TINY
|
#ifdef JEMALLOC_TINY
|
||||||
malloc_message("Tiny 2^n-spaced sizes: [", umax2s((1U << TINY_MIN_2POW),
|
malloc_message("Tiny 2^n-spaced sizes: [", umax2s((1U << LG_TINY_MIN),
|
||||||
10, s), "..", "");
|
10, s), "..", "");
|
||||||
malloc_message(umax2s((qspace_min >> 1), 10, s), "]\n", "", "");
|
malloc_message(umax2s((qspace_min >> 1), 10, s), "]\n", "", "");
|
||||||
#endif
|
#endif
|
||||||
@ -5086,7 +5081,7 @@ malloc_print_stats(void)
|
|||||||
umax2s(opt_dirty_max, 10, s), "\n", "");
|
umax2s(opt_dirty_max, 10, s), "\n", "");
|
||||||
|
|
||||||
malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "", "");
|
malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "", "");
|
||||||
malloc_message(" (2^", umax2s(opt_chunk_2pow, 10, s), ")\n", "");
|
malloc_message(" (2^", umax2s(opt_lg_chunk, 10, s), ")\n", "");
|
||||||
|
|
||||||
#ifdef JEMALLOC_STATS
|
#ifdef JEMALLOC_STATS
|
||||||
{
|
{
|
||||||
@ -5165,35 +5160,35 @@ small_size2bin_validate(void)
|
|||||||
i = 1;
|
i = 1;
|
||||||
# ifdef JEMALLOC_TINY
|
# ifdef JEMALLOC_TINY
|
||||||
/* Tiny. */
|
/* Tiny. */
|
||||||
for (; i < (1U << TINY_MIN_2POW); i++) {
|
for (; i < (1U << LG_TINY_MIN); i++) {
|
||||||
size = pow2_ceil(1U << TINY_MIN_2POW);
|
size = pow2_ceil(1U << LG_TINY_MIN);
|
||||||
binind = ffs((int)(size >> (TINY_MIN_2POW + 1)));
|
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
|
||||||
assert(small_size2bin[i] == binind);
|
assert(small_size2bin[i] == binind);
|
||||||
}
|
}
|
||||||
for (; i < qspace_min; i++) {
|
for (; i < qspace_min; i++) {
|
||||||
size = pow2_ceil(i);
|
size = pow2_ceil(i);
|
||||||
binind = ffs((int)(size >> (TINY_MIN_2POW + 1)));
|
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
|
||||||
assert(small_size2bin[i] == binind);
|
assert(small_size2bin[i] == binind);
|
||||||
}
|
}
|
||||||
# endif
|
# endif
|
||||||
/* Quantum-spaced. */
|
/* Quantum-spaced. */
|
||||||
for (; i <= qspace_max; i++) {
|
for (; i <= qspace_max; i++) {
|
||||||
size = QUANTUM_CEILING(i);
|
size = QUANTUM_CEILING(i);
|
||||||
binind = ntbins + (size >> QUANTUM_2POW) - 1;
|
binind = ntbins + (size >> LG_QUANTUM) - 1;
|
||||||
assert(small_size2bin[i] == binind);
|
assert(small_size2bin[i] == binind);
|
||||||
}
|
}
|
||||||
/* Cacheline-spaced. */
|
/* Cacheline-spaced. */
|
||||||
for (; i <= cspace_max; i++) {
|
for (; i <= cspace_max; i++) {
|
||||||
size = CACHELINE_CEILING(i);
|
size = CACHELINE_CEILING(i);
|
||||||
binind = ntbins + nqbins + ((size - cspace_min) >>
|
binind = ntbins + nqbins + ((size - cspace_min) >>
|
||||||
CACHELINE_2POW);
|
LG_CACHELINE);
|
||||||
assert(small_size2bin[i] == binind);
|
assert(small_size2bin[i] == binind);
|
||||||
}
|
}
|
||||||
/* Sub-page. */
|
/* Sub-page. */
|
||||||
for (; i <= sspace_max; i++) {
|
for (; i <= sspace_max; i++) {
|
||||||
size = SUBPAGE_CEILING(i);
|
size = SUBPAGE_CEILING(i);
|
||||||
binind = ntbins + nqbins + ncbins + ((size - sspace_min)
|
binind = ntbins + nqbins + ncbins + ((size - sspace_min)
|
||||||
>> SUBPAGE_2POW);
|
>> LG_SUBPAGE);
|
||||||
assert(small_size2bin[i] == binind);
|
assert(small_size2bin[i] == binind);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5203,8 +5198,8 @@ static bool
|
|||||||
small_size2bin_init(void)
|
small_size2bin_init(void)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (opt_qspace_max_2pow != QSPACE_MAX_2POW_DEFAULT
|
if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
|
||||||
|| opt_cspace_max_2pow != CSPACE_MAX_2POW_DEFAULT
|
|| opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
|
||||||
|| sizeof(const_small_size2bin) != small_maxclass + 1)
|
|| sizeof(const_small_size2bin) != small_maxclass + 1)
|
||||||
return (small_size2bin_init_hard());
|
return (small_size2bin_init_hard());
|
||||||
|
|
||||||
@ -5222,8 +5217,8 @@ small_size2bin_init_hard(void)
|
|||||||
size_t i, size, binind;
|
size_t i, size, binind;
|
||||||
uint8_t *custom_small_size2bin;
|
uint8_t *custom_small_size2bin;
|
||||||
|
|
||||||
assert(opt_qspace_max_2pow != QSPACE_MAX_2POW_DEFAULT
|
assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
|
||||||
|| opt_cspace_max_2pow != CSPACE_MAX_2POW_DEFAULT
|
|| opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
|
||||||
|| sizeof(const_small_size2bin) != small_maxclass + 1);
|
|| sizeof(const_small_size2bin) != small_maxclass + 1);
|
||||||
|
|
||||||
custom_small_size2bin = (uint8_t *)base_alloc(small_maxclass + 1);
|
custom_small_size2bin = (uint8_t *)base_alloc(small_maxclass + 1);
|
||||||
@ -5234,35 +5229,35 @@ small_size2bin_init_hard(void)
|
|||||||
i = 1;
|
i = 1;
|
||||||
#ifdef JEMALLOC_TINY
|
#ifdef JEMALLOC_TINY
|
||||||
/* Tiny. */
|
/* Tiny. */
|
||||||
for (; i < (1U << TINY_MIN_2POW); i++) {
|
for (; i < (1U << LG_TINY_MIN); i++) {
|
||||||
size = pow2_ceil(1U << TINY_MIN_2POW);
|
size = pow2_ceil(1U << LG_TINY_MIN);
|
||||||
binind = ffs((int)(size >> (TINY_MIN_2POW + 1)));
|
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
|
||||||
custom_small_size2bin[i] = binind;
|
custom_small_size2bin[i] = binind;
|
||||||
}
|
}
|
||||||
for (; i < qspace_min; i++) {
|
for (; i < qspace_min; i++) {
|
||||||
size = pow2_ceil(i);
|
size = pow2_ceil(i);
|
||||||
binind = ffs((int)(size >> (TINY_MIN_2POW + 1)));
|
binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
|
||||||
custom_small_size2bin[i] = binind;
|
custom_small_size2bin[i] = binind;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
/* Quantum-spaced. */
|
/* Quantum-spaced. */
|
||||||
for (; i <= qspace_max; i++) {
|
for (; i <= qspace_max; i++) {
|
||||||
size = QUANTUM_CEILING(i);
|
size = QUANTUM_CEILING(i);
|
||||||
binind = ntbins + (size >> QUANTUM_2POW) - 1;
|
binind = ntbins + (size >> LG_QUANTUM) - 1;
|
||||||
custom_small_size2bin[i] = binind;
|
custom_small_size2bin[i] = binind;
|
||||||
}
|
}
|
||||||
/* Cacheline-spaced. */
|
/* Cacheline-spaced. */
|
||||||
for (; i <= cspace_max; i++) {
|
for (; i <= cspace_max; i++) {
|
||||||
size = CACHELINE_CEILING(i);
|
size = CACHELINE_CEILING(i);
|
||||||
binind = ntbins + nqbins + ((size - cspace_min) >>
|
binind = ntbins + nqbins + ((size - cspace_min) >>
|
||||||
CACHELINE_2POW);
|
LG_CACHELINE);
|
||||||
custom_small_size2bin[i] = binind;
|
custom_small_size2bin[i] = binind;
|
||||||
}
|
}
|
||||||
/* Sub-page. */
|
/* Sub-page. */
|
||||||
for (; i <= sspace_max; i++) {
|
for (; i <= sspace_max; i++) {
|
||||||
size = SUBPAGE_CEILING(i);
|
size = SUBPAGE_CEILING(i);
|
||||||
binind = ntbins + nqbins + ncbins + ((size - sspace_min) >>
|
binind = ntbins + nqbins + ncbins + ((size - sspace_min) >>
|
||||||
SUBPAGE_2POW);
|
LG_SUBPAGE);
|
||||||
custom_small_size2bin[i] = binind;
|
custom_small_size2bin[i] = binind;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5344,11 +5339,11 @@ malloc_init_hard(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* We assume that pagesize is a power of 2 when calculating
|
* We assume that pagesize is a power of 2 when calculating
|
||||||
* pagesize_mask and pagesize_2pow.
|
* pagesize_mask and lg_pagesize.
|
||||||
*/
|
*/
|
||||||
assert(((result - 1) & result) == 0);
|
assert(((result - 1) & result) == 0);
|
||||||
pagesize_mask = result - 1;
|
pagesize_mask = result - 1;
|
||||||
pagesize_2pow = ffs((int)result) - 1;
|
lg_pagesize = ffs((int)result) - 1;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -5435,16 +5430,16 @@ MALLOC_OUT:
|
|||||||
opt_abort = true;
|
opt_abort = true;
|
||||||
break;
|
break;
|
||||||
case 'c':
|
case 'c':
|
||||||
if (opt_cspace_max_2pow - 1 >
|
if (opt_lg_cspace_max - 1 >
|
||||||
opt_qspace_max_2pow &&
|
opt_lg_qspace_max &&
|
||||||
opt_cspace_max_2pow >
|
opt_lg_cspace_max >
|
||||||
CACHELINE_2POW)
|
LG_CACHELINE)
|
||||||
opt_cspace_max_2pow--;
|
opt_lg_cspace_max--;
|
||||||
break;
|
break;
|
||||||
case 'C':
|
case 'C':
|
||||||
if (opt_cspace_max_2pow < PAGE_SHIFT
|
if (opt_lg_cspace_max < PAGE_SHIFT
|
||||||
- 1)
|
- 1)
|
||||||
opt_cspace_max_2pow++;
|
opt_lg_cspace_max++;
|
||||||
break;
|
break;
|
||||||
case 'f':
|
case 'f':
|
||||||
opt_dirty_max >>= 1;
|
opt_dirty_max >>= 1;
|
||||||
@ -5485,24 +5480,24 @@ MALLOC_OUT:
|
|||||||
* size class (one page more than the
|
* size class (one page more than the
|
||||||
* size).
|
* size).
|
||||||
*/
|
*/
|
||||||
if ((1U << (opt_chunk_2pow - 1)) >=
|
if ((1U << (opt_lg_chunk - 1)) >=
|
||||||
(2U << PAGE_SHIFT) + (1U <<
|
(2U << PAGE_SHIFT) + (1U <<
|
||||||
opt_medium_max_2pow))
|
opt_lg_medium_max))
|
||||||
opt_chunk_2pow--;
|
opt_lg_chunk--;
|
||||||
break;
|
break;
|
||||||
case 'K':
|
case 'K':
|
||||||
if (opt_chunk_2pow + 1 <
|
if (opt_lg_chunk + 1 <
|
||||||
(sizeof(size_t) << 3))
|
(sizeof(size_t) << 3))
|
||||||
opt_chunk_2pow++;
|
opt_lg_chunk++;
|
||||||
break;
|
break;
|
||||||
case 'm':
|
case 'm':
|
||||||
if (opt_medium_max_2pow > PAGE_SHIFT)
|
if (opt_lg_medium_max > PAGE_SHIFT)
|
||||||
opt_medium_max_2pow--;
|
opt_lg_medium_max--;
|
||||||
break;
|
break;
|
||||||
case 'M':
|
case 'M':
|
||||||
if (opt_medium_max_2pow + 1 <
|
if (opt_lg_medium_max + 1 <
|
||||||
opt_chunk_2pow)
|
opt_lg_chunk)
|
||||||
opt_medium_max_2pow++;
|
opt_lg_medium_max++;
|
||||||
break;
|
break;
|
||||||
case 'n':
|
case 'n':
|
||||||
opt_narenas_lshift--;
|
opt_narenas_lshift--;
|
||||||
@ -5517,13 +5512,13 @@ MALLOC_OUT:
|
|||||||
opt_print_stats = true;
|
opt_print_stats = true;
|
||||||
break;
|
break;
|
||||||
case 'q':
|
case 'q':
|
||||||
if (opt_qspace_max_2pow > QUANTUM_2POW)
|
if (opt_lg_qspace_max > LG_QUANTUM)
|
||||||
opt_qspace_max_2pow--;
|
opt_lg_qspace_max--;
|
||||||
break;
|
break;
|
||||||
case 'Q':
|
case 'Q':
|
||||||
if (opt_qspace_max_2pow + 1 <
|
if (opt_lg_qspace_max + 1 <
|
||||||
opt_cspace_max_2pow)
|
opt_lg_cspace_max)
|
||||||
opt_qspace_max_2pow++;
|
opt_lg_qspace_max++;
|
||||||
break;
|
break;
|
||||||
#ifdef JEMALLOC_TRACE
|
#ifdef JEMALLOC_TRACE
|
||||||
case 't':
|
case 't':
|
||||||
@ -5593,26 +5588,26 @@ MALLOC_OUT:
|
|||||||
/* Register fork handlers. */
|
/* Register fork handlers. */
|
||||||
pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork);
|
pthread_atfork(jemalloc_prefork, jemalloc_postfork, jemalloc_postfork);
|
||||||
|
|
||||||
/* Set variables according to the value of opt_[qc]space_max_2pow. */
|
/* Set variables according to the value of opt_lg_[qc]space_max. */
|
||||||
qspace_max = (1U << opt_qspace_max_2pow);
|
qspace_max = (1U << opt_lg_qspace_max);
|
||||||
cspace_min = CACHELINE_CEILING(qspace_max);
|
cspace_min = CACHELINE_CEILING(qspace_max);
|
||||||
if (cspace_min == qspace_max)
|
if (cspace_min == qspace_max)
|
||||||
cspace_min += CACHELINE;
|
cspace_min += CACHELINE;
|
||||||
cspace_max = (1U << opt_cspace_max_2pow);
|
cspace_max = (1U << opt_lg_cspace_max);
|
||||||
sspace_min = SUBPAGE_CEILING(cspace_max);
|
sspace_min = SUBPAGE_CEILING(cspace_max);
|
||||||
if (sspace_min == cspace_max)
|
if (sspace_min == cspace_max)
|
||||||
sspace_min += SUBPAGE;
|
sspace_min += SUBPAGE;
|
||||||
assert(sspace_min < PAGE_SIZE);
|
assert(sspace_min < PAGE_SIZE);
|
||||||
sspace_max = PAGE_SIZE - SUBPAGE;
|
sspace_max = PAGE_SIZE - SUBPAGE;
|
||||||
medium_max = (1U << opt_medium_max_2pow);
|
medium_max = (1U << opt_lg_medium_max);
|
||||||
|
|
||||||
#ifdef JEMALLOC_TINY
|
#ifdef JEMALLOC_TINY
|
||||||
assert(QUANTUM_2POW >= TINY_MIN_2POW);
|
assert(LG_QUANTUM >= LG_TINY_MIN);
|
||||||
#endif
|
#endif
|
||||||
assert(ntbins <= QUANTUM_2POW);
|
assert(ntbins <= LG_QUANTUM);
|
||||||
nqbins = qspace_max >> QUANTUM_2POW;
|
nqbins = qspace_max >> LG_QUANTUM;
|
||||||
ncbins = ((cspace_max - cspace_min) >> CACHELINE_2POW) + 1;
|
ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
|
||||||
nsbins = ((sspace_max - sspace_min) >> SUBPAGE_2POW) + 1;
|
nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute medium size class spacing and the number of medium size
|
* Compute medium size class spacing and the number of medium size
|
||||||
@ -5620,13 +5615,13 @@ MALLOC_OUT:
|
|||||||
* use the smallest spacing that does not exceed NMBINS_MAX medium size
|
* use the smallest spacing that does not exceed NMBINS_MAX medium size
|
||||||
* classes.
|
* classes.
|
||||||
*/
|
*/
|
||||||
mspace_2pow = SUBPAGE_2POW;
|
lg_mspace = LG_SUBPAGE;
|
||||||
nmbins = ((medium_max - medium_min) >> mspace_2pow) + 1;
|
nmbins = ((medium_max - medium_min) >> lg_mspace) + 1;
|
||||||
while (mspace_2pow < PAGE_SHIFT && nmbins > NMBINS_MAX) {
|
while (lg_mspace < PAGE_SHIFT && nmbins > NMBINS_MAX) {
|
||||||
mspace_2pow = mspace_2pow + 1;
|
lg_mspace = lg_mspace + 1;
|
||||||
nmbins = ((medium_max - medium_min) >> mspace_2pow) + 1;
|
nmbins = ((medium_max - medium_min) >> lg_mspace) + 1;
|
||||||
}
|
}
|
||||||
mspace_mask = (1U << mspace_2pow) - 1U;
|
mspace_mask = (1U << lg_mspace) - 1U;
|
||||||
|
|
||||||
mbin0 = ntbins + nqbins + ncbins + nsbins;
|
mbin0 = ntbins + nqbins + ncbins + nsbins;
|
||||||
nbins = mbin0 + nmbins;
|
nbins = mbin0 + nmbins;
|
||||||
@ -5656,8 +5651,8 @@ MALLOC_OUT:
|
|||||||
((TCACHE_GC_THRESHOLD % nbins == 0) ? 0 : 1);
|
((TCACHE_GC_THRESHOLD % nbins == 0) ? 0 : 1);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Set variables according to the value of opt_chunk_2pow. */
|
/* Set variables according to the value of opt_lg_chunk. */
|
||||||
chunksize = (1LU << opt_chunk_2pow);
|
chunksize = (1LU << opt_lg_chunk);
|
||||||
chunksize_mask = chunksize - 1;
|
chunksize_mask = chunksize - 1;
|
||||||
chunk_npages = (chunksize >> PAGE_SHIFT);
|
chunk_npages = (chunksize >> PAGE_SHIFT);
|
||||||
{
|
{
|
||||||
@ -5830,7 +5825,7 @@ MALLOC_OUT:
|
|||||||
* spread allocations evenly among the arenas.
|
* spread allocations evenly among the arenas.
|
||||||
*/
|
*/
|
||||||
assert((narenas & 1) == 0); /* narenas must be even. */
|
assert((narenas & 1) == 0); /* narenas must be even. */
|
||||||
nprimes = (sizeof(primes) >> SIZEOF_INT_2POW);
|
nprimes = (sizeof(primes) >> LG_SIZEOF_INT);
|
||||||
parenas = primes[nprimes - 1]; /* In case not enough primes. */
|
parenas = primes[nprimes - 1]; /* In case not enough primes. */
|
||||||
for (i = 1; i < nprimes; i++) {
|
for (i = 1; i < nprimes; i++) {
|
||||||
if (primes[i] > narenas) {
|
if (primes[i] > narenas) {
|
||||||
|
@ -125,7 +125,10 @@
|
|||||||
/* TLS is used to map arenas and magazine caches to threads. */
|
/* TLS is used to map arenas and magazine caches to threads. */
|
||||||
#undef NO_TLS
|
#undef NO_TLS
|
||||||
|
|
||||||
/* sizeof(void *) == 2^SIZEOF_PTR_2POW. */
|
/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
|
||||||
#undef SIZEOF_PTR_2POW
|
#undef LG_SIZEOF_PTR
|
||||||
|
|
||||||
|
/* sizeof(int) == 2^LG_SIZEOF_INT. */
|
||||||
|
#undef LG_SIZEOF_INT
|
||||||
|
|
||||||
#endif /* JEMALLOC_DEFS_H_ */
|
#endif /* JEMALLOC_DEFS_H_ */
|
||||||
|
Loading…
Reference in New Issue
Block a user