Clean up *PAGE* macros.
s/PAGE_SHIFT/LG_PAGE/g and s/PAGE_SIZE/PAGE/g. Remove remnants of the dynamic-page-shift code. Rename the "arenas.pagesize" mallctl to "arenas.page". Remove the "arenas.chunksize" mallctl, which is redundant with "opt.lg_chunk".
This commit is contained in:
@@ -130,10 +130,10 @@ struct arena_chunk_map_s {
|
||||
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx
|
||||
* -------- -------- -------- ----D-LA
|
||||
*
|
||||
* Large (sampled, size <= PAGE_SIZE):
|
||||
* Large (sampled, size <= PAGE):
|
||||
* ssssssss ssssssss sssscccc ccccD-LA
|
||||
*
|
||||
* Large (not sampled, size == PAGE_SIZE):
|
||||
* Large (not sampled, size == PAGE):
|
||||
* ssssssss ssssssss ssss---- ----D-LA
|
||||
*/
|
||||
size_t bits;
|
||||
@@ -486,7 +486,7 @@ arena_prof_ctx_get(const void *ptr)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
@@ -494,8 +494,8 @@ arena_prof_ctx_get(const void *ptr)
|
||||
ret = (prof_ctx_t *)(uintptr_t)1U;
|
||||
else {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
unsigned regind;
|
||||
@@ -522,14 +522,14 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
if (prof_promote == false) {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
arena_bin_t *bin = run->bin;
|
||||
size_t binind;
|
||||
arena_bin_info_t *bin_info;
|
||||
@@ -598,7 +598,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
||||
@@ -610,8 +610,8 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
arena_bin_t *bin;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >>
|
||||
PAGE_SHIFT)) << PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
bin = run->bin;
|
||||
if (config_debug) {
|
||||
size_t binind = arena_bin_index(arena, bin);
|
||||
|
@@ -58,13 +58,6 @@ static const bool config_dss =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_dynamic_page_shift =
|
||||
#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
@@ -266,20 +259,12 @@ static const bool config_ivsalloc =
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
|
||||
#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
|
||||
#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
|
||||
#ifdef PAGE_SHIFT
|
||||
# undef PAGE_SHIFT
|
||||
#endif
|
||||
#ifdef PAGE_SIZE
|
||||
# undef PAGE_SIZE
|
||||
#endif
|
||||
#ifdef PAGE_MASK
|
||||
# undef PAGE_MASK
|
||||
#endif
|
||||
#define PAGE_SHIFT STATIC_PAGE_SHIFT
|
||||
#define PAGE_SIZE STATIC_PAGE_SIZE
|
||||
#define PAGE_MASK STATIC_PAGE_MASK
|
||||
#define LG_PAGE STATIC_PAGE_SHIFT
|
||||
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define PAGE_CEILING(s) \
|
||||
@@ -351,12 +336,6 @@ extern bool opt_xmalloc;
|
||||
extern bool opt_zero;
|
||||
extern size_t opt_narenas;
|
||||
|
||||
#ifdef DYNAMIC_PAGE_SHIFT
|
||||
extern size_t pagesize;
|
||||
extern size_t pagesize_mask;
|
||||
extern size_t lg_pagesize;
|
||||
#endif
|
||||
|
||||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
@@ -479,7 +458,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
|
||||
if (usize <= arena_maxclass && alignment <= PAGE) {
|
||||
if (usize <= SMALL_MAXCLASS)
|
||||
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
|
||||
return (PAGE_CEILING(usize));
|
||||
@@ -494,7 +473,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
|
||||
usize = PAGE_CEILING(size);
|
||||
/*
|
||||
* (usize < size) protects against very large sizes within
|
||||
* PAGE_SIZE of SIZE_T_MAX.
|
||||
* PAGE of SIZE_T_MAX.
|
||||
*
|
||||
* (usize + alignment < usize) protects against the
|
||||
* combination of maximal alignment and usize large enough
|
||||
@@ -514,18 +493,18 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
|
||||
* would need to allocate in order to guarantee the alignment.
|
||||
*/
|
||||
if (usize >= alignment)
|
||||
run_size = usize + alignment - PAGE_SIZE;
|
||||
run_size = usize + alignment - PAGE;
|
||||
else {
|
||||
/*
|
||||
* It is possible that (alignment << 1) will cause
|
||||
* overflow, but it doesn't matter because we also
|
||||
* subtract PAGE_SIZE, which in the case of overflow
|
||||
* leaves us with a very large run_size. That causes
|
||||
* the first conditional below to fail, which means
|
||||
* that the bogus run_size value never gets used for
|
||||
* subtract PAGE, which in the case of overflow leaves
|
||||
* us with a very large run_size. That causes the
|
||||
* first conditional below to fail, which means that
|
||||
* the bogus run_size value never gets used for
|
||||
* anything important.
|
||||
*/
|
||||
run_size = (alignment << 1) - PAGE_SIZE;
|
||||
run_size = (alignment << 1) - PAGE;
|
||||
}
|
||||
if (run_size_p != NULL)
|
||||
*run_size_p = run_size;
|
||||
@@ -600,7 +579,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
|
||||
assert(usize != 0);
|
||||
assert(usize == sa2u(usize, alignment, NULL));
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
|
||||
if (usize <= arena_maxclass && alignment <= PAGE)
|
||||
ret = arena_malloc(usize, zero);
|
||||
else {
|
||||
size_t run_size JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
@@ -36,7 +36,7 @@ while [ ${lg_q} -le ${lg_qmax} ] ; do
|
||||
lg_p=${lg_pmin}
|
||||
while [ ${lg_p} -le ${lg_pmax} ] ; do
|
||||
cat <<EOF
|
||||
#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && PAGE_SHIFT == ${lg_p})
|
||||
#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
EOF
|
||||
pow2 ${lg_q}; q=${pow2_result}
|
||||
|
@@ -369,7 +369,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
|
||||
size = PAGE_CEILING(size);
|
||||
assert(size <= tcache_maxclass);
|
||||
binind = NBINS + (size >> PAGE_SHIFT) - 1;
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
assert(binind < nhbins);
|
||||
tbin = &tcache->tbins[binind];
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
@@ -386,7 +386,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
PAGE_SHIFT);
|
||||
LG_PAGE);
|
||||
chunk->map[pageind-map_bias].bits &=
|
||||
~CHUNK_MAP_CLASS_MASK;
|
||||
}
|
||||
@@ -426,10 +426,10 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
||||
(mapelm->bits >> LG_PAGE)) << LG_PAGE));
|
||||
bin = run->bin;
|
||||
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
||||
sizeof(arena_bin_t);
|
||||
@@ -462,7 +462,7 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
assert(arena_salloc(ptr) > SMALL_MAXCLASS);
|
||||
assert(arena_salloc(ptr) <= tcache_maxclass);
|
||||
|
||||
binind = NBINS + (size >> PAGE_SHIFT) - 1;
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
memset(ptr, 0x5a, size);
|
||||
|
Reference in New Issue
Block a user