Clean up *PAGE* macros.
s/PAGE_SHIFT/LG_PAGE/g and s/PAGE_SIZE/PAGE/g. Remove remnants of the dynamic-page-shift code. Rename the "arenas.pagesize" mallctl to "arenas.page". Remove the "arenas.chunksize" mallctl, which is redundant with "opt.lg_chunk".
This commit is contained in:
parent
f004737267
commit
ae4c7b4b40
@ -1167,22 +1167,13 @@ malloc_conf = "xmalloc:true";]]></programlisting>
|
||||
|
||||
<varlistentry>
|
||||
<term>
|
||||
<mallctl>arenas.pagesize</mallctl>
|
||||
<mallctl>arenas.page</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Page size.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term>
|
||||
<mallctl>arenas.chunksize</mallctl>
|
||||
(<type>size_t</type>)
|
||||
<literal>r-</literal>
|
||||
</term>
|
||||
<listitem><para>Chunk size.</para></listitem>
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry>
|
||||
<term>
|
||||
<mallctl>arenas.tcache_max</mallctl>
|
||||
|
@ -130,10 +130,10 @@ struct arena_chunk_map_s {
|
||||
* xxxxxxxx xxxxxxxx xxxx---- ----xxxx
|
||||
* -------- -------- -------- ----D-LA
|
||||
*
|
||||
* Large (sampled, size <= PAGE_SIZE):
|
||||
* Large (sampled, size <= PAGE):
|
||||
* ssssssss ssssssss sssscccc ccccD-LA
|
||||
*
|
||||
* Large (not sampled, size == PAGE_SIZE):
|
||||
* Large (not sampled, size == PAGE):
|
||||
* ssssssss ssssssss ssss---- ----D-LA
|
||||
*/
|
||||
size_t bits;
|
||||
@ -486,7 +486,7 @@ arena_prof_ctx_get(const void *ptr)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
@ -494,8 +494,8 @@ arena_prof_ctx_get(const void *ptr)
|
||||
ret = (prof_ctx_t *)(uintptr_t)1U;
|
||||
else {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
unsigned regind;
|
||||
@ -522,14 +522,14 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
if (prof_promote == false) {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
arena_bin_t *bin = run->bin;
|
||||
size_t binind;
|
||||
arena_bin_info_t *bin_info;
|
||||
@ -598,7 +598,7 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
||||
@ -610,8 +610,8 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
arena_bin_t *bin;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >>
|
||||
PAGE_SHIFT)) << PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
bin = run->bin;
|
||||
if (config_debug) {
|
||||
size_t binind = arena_bin_index(arena, bin);
|
||||
|
@ -58,13 +58,6 @@ static const bool config_dss =
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_dynamic_page_shift =
|
||||
#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
|
||||
true
|
||||
#else
|
||||
false
|
||||
#endif
|
||||
;
|
||||
static const bool config_fill =
|
||||
#ifdef JEMALLOC_FILL
|
||||
true
|
||||
@ -266,20 +259,12 @@ static const bool config_ivsalloc =
|
||||
(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
|
||||
|
||||
/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */
|
||||
#define STATIC_PAGE_SIZE ((size_t)(1U << STATIC_PAGE_SHIFT))
|
||||
#define STATIC_PAGE_MASK ((size_t)(STATIC_PAGE_SIZE - 1))
|
||||
#ifdef PAGE_SHIFT
|
||||
# undef PAGE_SHIFT
|
||||
#endif
|
||||
#ifdef PAGE_SIZE
|
||||
# undef PAGE_SIZE
|
||||
#endif
|
||||
#ifdef PAGE_MASK
|
||||
# undef PAGE_MASK
|
||||
#endif
|
||||
#define PAGE_SHIFT STATIC_PAGE_SHIFT
|
||||
#define PAGE_SIZE STATIC_PAGE_SIZE
|
||||
#define PAGE_MASK STATIC_PAGE_MASK
|
||||
#define LG_PAGE STATIC_PAGE_SHIFT
|
||||
#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT))
|
||||
#define PAGE_MASK ((size_t)(PAGE - 1))
|
||||
|
||||
/* Return the smallest pagesize multiple that is >= s. */
|
||||
#define PAGE_CEILING(s) \
|
||||
@ -351,12 +336,6 @@ extern bool opt_xmalloc;
|
||||
extern bool opt_zero;
|
||||
extern size_t opt_narenas;
|
||||
|
||||
#ifdef DYNAMIC_PAGE_SHIFT
|
||||
extern size_t pagesize;
|
||||
extern size_t pagesize_mask;
|
||||
extern size_t lg_pagesize;
|
||||
#endif
|
||||
|
||||
/* Number of CPUs. */
|
||||
extern unsigned ncpus;
|
||||
|
||||
@ -479,7 +458,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
|
||||
if (usize <= arena_maxclass && alignment <= PAGE) {
|
||||
if (usize <= SMALL_MAXCLASS)
|
||||
return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size);
|
||||
return (PAGE_CEILING(usize));
|
||||
@ -494,7 +473,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
|
||||
usize = PAGE_CEILING(size);
|
||||
/*
|
||||
* (usize < size) protects against very large sizes within
|
||||
* PAGE_SIZE of SIZE_T_MAX.
|
||||
* PAGE of SIZE_T_MAX.
|
||||
*
|
||||
* (usize + alignment < usize) protects against the
|
||||
* combination of maximal alignment and usize large enough
|
||||
@ -514,18 +493,18 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
|
||||
* would need to allocate in order to guarantee the alignment.
|
||||
*/
|
||||
if (usize >= alignment)
|
||||
run_size = usize + alignment - PAGE_SIZE;
|
||||
run_size = usize + alignment - PAGE;
|
||||
else {
|
||||
/*
|
||||
* It is possible that (alignment << 1) will cause
|
||||
* overflow, but it doesn't matter because we also
|
||||
* subtract PAGE_SIZE, which in the case of overflow
|
||||
* leaves us with a very large run_size. That causes
|
||||
* the first conditional below to fail, which means
|
||||
* that the bogus run_size value never gets used for
|
||||
* subtract PAGE, which in the case of overflow leaves
|
||||
* us with a very large run_size. That causes the
|
||||
* first conditional below to fail, which means that
|
||||
* the bogus run_size value never gets used for
|
||||
* anything important.
|
||||
*/
|
||||
run_size = (alignment << 1) - PAGE_SIZE;
|
||||
run_size = (alignment << 1) - PAGE;
|
||||
}
|
||||
if (run_size_p != NULL)
|
||||
*run_size_p = run_size;
|
||||
@ -600,7 +579,7 @@ ipalloc(size_t usize, size_t alignment, bool zero)
|
||||
assert(usize != 0);
|
||||
assert(usize == sa2u(usize, alignment, NULL));
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE_SIZE)
|
||||
if (usize <= arena_maxclass && alignment <= PAGE)
|
||||
ret = arena_malloc(usize, zero);
|
||||
else {
|
||||
size_t run_size JEMALLOC_CC_SILENCE_INIT(0);
|
||||
|
@ -36,7 +36,7 @@ while [ ${lg_q} -le ${lg_qmax} ] ; do
|
||||
lg_p=${lg_pmin}
|
||||
while [ ${lg_p} -le ${lg_pmax} ] ; do
|
||||
cat <<EOF
|
||||
#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && PAGE_SHIFT == ${lg_p})
|
||||
#if (LG_TINY_MIN == ${lg_t} && LG_QUANTUM == ${lg_q} && LG_PAGE == ${lg_p})
|
||||
#define SIZE_CLASSES_DEFINED
|
||||
EOF
|
||||
pow2 ${lg_q}; q=${pow2_result}
|
||||
|
@ -369,7 +369,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
|
||||
size = PAGE_CEILING(size);
|
||||
assert(size <= tcache_maxclass);
|
||||
binind = NBINS + (size >> PAGE_SHIFT) - 1;
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
assert(binind < nhbins);
|
||||
tbin = &tcache->tbins[binind];
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
@ -386,7 +386,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
PAGE_SHIFT);
|
||||
LG_PAGE);
|
||||
chunk->map[pageind-map_bias].bits &=
|
||||
~CHUNK_MAP_CLASS_MASK;
|
||||
}
|
||||
@ -426,10 +426,10 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
||||
(mapelm->bits >> LG_PAGE)) << LG_PAGE));
|
||||
bin = run->bin;
|
||||
binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) /
|
||||
sizeof(arena_bin_t);
|
||||
@ -462,7 +462,7 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
assert(arena_salloc(ptr) > SMALL_MAXCLASS);
|
||||
assert(arena_salloc(ptr) <= tcache_maxclass);
|
||||
|
||||
binind = NBINS + (size >> PAGE_SHIFT) - 1;
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
|
||||
if (config_fill && opt_junk)
|
||||
memset(ptr, 0x5a, size);
|
||||
|
206
src/arena.c
206
src/arena.c
@ -176,10 +176,9 @@ static inline void
|
||||
arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
|
||||
{
|
||||
size_t i;
|
||||
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind <<
|
||||
PAGE_SHIFT));
|
||||
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
|
||||
|
||||
for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++)
|
||||
for (i = 0; i < PAGE / sizeof(size_t); i++)
|
||||
assert(p[i] == 0);
|
||||
}
|
||||
|
||||
@ -193,16 +192,15 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
arena_avail_tree_t *runs_avail;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
|
||||
>> PAGE_SHIFT);
|
||||
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
|
||||
flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
|
||||
runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
|
||||
&arena->runs_avail_clean;
|
||||
total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
|
||||
PAGE_SHIFT;
|
||||
LG_PAGE;
|
||||
assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY) == flag_dirty);
|
||||
need_pages = (size >> PAGE_SHIFT);
|
||||
need_pages = (size >> LG_PAGE);
|
||||
assert(need_pages > 0);
|
||||
assert(need_pages <= total_pages);
|
||||
rem_pages = total_pages - need_pages;
|
||||
@ -214,8 +212,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
* multiple.
|
||||
*/
|
||||
size_t cactive_diff = CHUNK_CEILING((arena->nactive +
|
||||
need_pages) << PAGE_SHIFT) - CHUNK_CEILING(arena->nactive <<
|
||||
PAGE_SHIFT);
|
||||
need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
|
||||
LG_PAGE);
|
||||
if (cactive_diff != 0)
|
||||
stats_cactive_add(cactive_diff);
|
||||
}
|
||||
@ -225,16 +223,16 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
if (rem_pages > 0) {
|
||||
if (flag_dirty != 0) {
|
||||
chunk->map[run_ind+need_pages-map_bias].bits =
|
||||
(rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
|
||||
(rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
|
||||
chunk->map[run_ind+total_pages-1-map_bias].bits =
|
||||
(rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
|
||||
(rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
|
||||
} else {
|
||||
chunk->map[run_ind+need_pages-map_bias].bits =
|
||||
(rem_pages << PAGE_SHIFT) |
|
||||
(rem_pages << LG_PAGE) |
|
||||
(chunk->map[run_ind+need_pages-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED);
|
||||
chunk->map[run_ind+total_pages-1-map_bias].bits =
|
||||
(rem_pages << PAGE_SHIFT) |
|
||||
(rem_pages << LG_PAGE) |
|
||||
(chunk->map[run_ind+total_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED);
|
||||
}
|
||||
@ -264,8 +262,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
& CHUNK_MAP_UNZEROED) != 0) {
|
||||
memset((void *)((uintptr_t)
|
||||
chunk + ((run_ind+i) <<
|
||||
PAGE_SHIFT)), 0,
|
||||
PAGE_SIZE);
|
||||
LG_PAGE)), 0, PAGE);
|
||||
} else if (config_debug) {
|
||||
arena_chunk_validate_zeroed(
|
||||
chunk, run_ind+i);
|
||||
@ -277,8 +274,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
* zeroed.
|
||||
*/
|
||||
memset((void *)((uintptr_t)chunk + (run_ind <<
|
||||
PAGE_SHIFT)), 0, (need_pages <<
|
||||
PAGE_SHIFT));
|
||||
LG_PAGE)), 0, (need_pages << LG_PAGE));
|
||||
}
|
||||
}
|
||||
|
||||
@ -310,7 +306,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
== 0)
|
||||
arena_chunk_validate_zeroed(chunk, run_ind);
|
||||
for (i = 1; i < need_pages - 1; i++) {
|
||||
chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT)
|
||||
chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
|
||||
| (chunk->map[run_ind+i-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
@ -319,7 +315,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
|
||||
arena_chunk_validate_zeroed(chunk, run_ind+i);
|
||||
}
|
||||
chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
|
||||
- 1) << PAGE_SHIFT) |
|
||||
- 1) << LG_PAGE) |
|
||||
(chunk->map[run_ind+need_pages-1-map_bias].bits &
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
|
||||
if (config_debug && flag_dirty == 0 &&
|
||||
@ -460,7 +456,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
+ map_bias;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
PAGE_SHIFT));
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
return (run);
|
||||
}
|
||||
@ -472,7 +468,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
+ map_bias;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
PAGE_SHIFT));
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
return (run);
|
||||
}
|
||||
@ -482,8 +478,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
*/
|
||||
chunk = arena_chunk_alloc(arena);
|
||||
if (chunk != NULL) {
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (map_bias <<
|
||||
PAGE_SHIFT));
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
return (run);
|
||||
}
|
||||
@ -501,7 +496,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
+ map_bias;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
PAGE_SHIFT));
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
return (run);
|
||||
}
|
||||
@ -513,7 +508,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
|
||||
+ map_bias;
|
||||
|
||||
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
|
||||
PAGE_SHIFT));
|
||||
LG_PAGE));
|
||||
arena_run_split(arena, run, size, large, zero);
|
||||
return (run);
|
||||
}
|
||||
@ -582,7 +577,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
|
||||
size_t npages;
|
||||
|
||||
npages = mapelm->bits >> PAGE_SHIFT;
|
||||
npages = mapelm->bits >> LG_PAGE;
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
if (mapelm->bits & CHUNK_MAP_DIRTY) {
|
||||
size_t i;
|
||||
@ -590,7 +585,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
arena_avail_tree_remove(
|
||||
&arena->runs_avail_dirty, mapelm);
|
||||
|
||||
mapelm->bits = (npages << PAGE_SHIFT) |
|
||||
mapelm->bits = (npages << LG_PAGE) |
|
||||
flag_unzeroed | CHUNK_MAP_LARGE |
|
||||
CHUNK_MAP_ALLOCATED;
|
||||
/*
|
||||
@ -615,9 +610,9 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
*/
|
||||
size_t cactive_diff =
|
||||
CHUNK_CEILING((arena->nactive +
|
||||
npages) << PAGE_SHIFT) -
|
||||
npages) << LG_PAGE) -
|
||||
CHUNK_CEILING(arena->nactive <<
|
||||
PAGE_SHIFT);
|
||||
LG_PAGE);
|
||||
if (cactive_diff != 0)
|
||||
stats_cactive_add(cactive_diff);
|
||||
}
|
||||
@ -631,17 +626,17 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
} else {
|
||||
/* Skip allocated run. */
|
||||
if (mapelm->bits & CHUNK_MAP_LARGE)
|
||||
pageind += mapelm->bits >> PAGE_SHIFT;
|
||||
pageind += mapelm->bits >> LG_PAGE;
|
||||
else {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)
|
||||
chunk + (uintptr_t)(pageind << PAGE_SHIFT));
|
||||
chunk + (uintptr_t)(pageind << LG_PAGE));
|
||||
|
||||
assert((mapelm->bits >> PAGE_SHIFT) == 0);
|
||||
assert((mapelm->bits >> LG_PAGE) == 0);
|
||||
size_t binind = arena_bin_index(arena,
|
||||
run->bin);
|
||||
arena_bin_info_t *bin_info =
|
||||
&arena_bin_info[binind];
|
||||
pageind += bin_info->run_size >> PAGE_SHIFT;
|
||||
pageind += bin_info->run_size >> LG_PAGE;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -662,7 +657,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
ql_foreach(mapelm, &mapelms, u.ql_link) {
|
||||
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
|
||||
sizeof(arena_chunk_map_t)) + map_bias;
|
||||
size_t npages = mapelm->bits >> PAGE_SHIFT;
|
||||
size_t npages = mapelm->bits >> LG_PAGE;
|
||||
|
||||
assert(pageind + npages <= chunk_npages);
|
||||
assert(ndirty >= npages);
|
||||
@ -676,8 +671,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
#else
|
||||
# error "No method defined for purging unused dirty pages."
|
||||
#endif
|
||||
madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
|
||||
(npages << PAGE_SHIFT), MADV_PURGE);
|
||||
madvise((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
|
||||
(npages << LG_PAGE), MADV_PURGE);
|
||||
#undef MADV_PURGE
|
||||
if (config_stats)
|
||||
nmadvise++;
|
||||
@ -693,7 +688,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
|
||||
size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
|
||||
sizeof(arena_chunk_map_t)) + map_bias;
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)(pageind << PAGE_SHIFT));
|
||||
(uintptr_t)(pageind << LG_PAGE));
|
||||
|
||||
ql_remove(&mapelms, mapelm, u.ql_link);
|
||||
arena_run_dalloc(arena, run, false);
|
||||
@ -804,33 +799,31 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
arena_avail_tree_t *runs_avail;
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
|
||||
>> PAGE_SHIFT);
|
||||
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
|
||||
assert(run_ind >= map_bias);
|
||||
assert(run_ind < chunk_npages);
|
||||
if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
|
||||
size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
|
||||
assert(size == PAGE_SIZE ||
|
||||
(chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
|
||||
assert(size == PAGE ||
|
||||
(chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
|
||||
~PAGE_MASK) == 0);
|
||||
assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
|
||||
assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
|
||||
CHUNK_MAP_LARGE) != 0);
|
||||
assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
|
||||
assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
|
||||
CHUNK_MAP_ALLOCATED) != 0);
|
||||
} else {
|
||||
size_t binind = arena_bin_index(arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
size = bin_info->run_size;
|
||||
}
|
||||
run_pages = (size >> PAGE_SHIFT);
|
||||
run_pages = (size >> LG_PAGE);
|
||||
if (config_stats) {
|
||||
/*
|
||||
* Update stats_cactive if nactive is crossing a chunk
|
||||
* multiple.
|
||||
*/
|
||||
size_t cactive_diff = CHUNK_CEILING(arena->nactive <<
|
||||
PAGE_SHIFT) - CHUNK_CEILING((arena->nactive - run_pages) <<
|
||||
PAGE_SHIFT);
|
||||
size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
|
||||
CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
|
||||
if (cactive_diff != 0)
|
||||
stats_cactive_sub(cactive_diff);
|
||||
}
|
||||
@ -869,7 +862,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
CHUNK_MAP_DIRTY) == flag_dirty) {
|
||||
size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
|
||||
~PAGE_MASK;
|
||||
size_t nrun_pages = nrun_size >> PAGE_SHIFT;
|
||||
size_t nrun_pages = nrun_size >> LG_PAGE;
|
||||
|
||||
/*
|
||||
* Remove successor from runs_avail; the coalesced run is
|
||||
@ -900,7 +893,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
||||
CHUNK_MAP_DIRTY) == flag_dirty) {
|
||||
size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
|
||||
~PAGE_MASK;
|
||||
size_t prun_pages = prun_size >> PAGE_SHIFT;
|
||||
size_t prun_pages = prun_size >> LG_PAGE;
|
||||
|
||||
run_ind -= prun_pages;
|
||||
|
||||
@ -970,8 +963,8 @@ static void
|
||||
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
size_t oldsize, size_t newsize)
|
||||
{
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT;
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t head_npages = (oldsize - newsize) >> LG_PAGE;
|
||||
size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
|
||||
|
||||
assert(oldsize > newsize);
|
||||
@ -991,7 +984,7 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
|
||||
|
||||
if (config_debug) {
|
||||
UNUSED size_t tail_npages = newsize >> PAGE_SHIFT;
|
||||
UNUSED size_t tail_npages = newsize >> LG_PAGE;
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
|
||||
.bits & ~PAGE_MASK) == 0);
|
||||
assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
|
||||
@ -1012,9 +1005,9 @@ static void
|
||||
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
size_t oldsize, size_t newsize, bool dirty)
|
||||
{
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
size_t head_npages = newsize >> PAGE_SHIFT;
|
||||
size_t tail_npages = (oldsize - newsize) >> PAGE_SHIFT;
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t head_npages = newsize >> LG_PAGE;
|
||||
size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
|
||||
size_t flag_dirty = chunk->map[pageind-map_bias].bits &
|
||||
CHUNK_MAP_DIRTY;
|
||||
|
||||
@ -1064,8 +1057,8 @@ arena_bin_runs_first(arena_bin_t *bin)
|
||||
pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
|
||||
sizeof(arena_chunk_map_t))) + map_bias;
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
return (run);
|
||||
}
|
||||
|
||||
@ -1076,7 +1069,7 @@ static void
|
||||
arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
|
||||
{
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
||||
|
||||
assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
|
||||
@ -1088,7 +1081,7 @@ static void
|
||||
arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
|
||||
{
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
||||
|
||||
assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
|
||||
@ -1331,9 +1324,9 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
|
||||
arena->stats.nmalloc_large++;
|
||||
arena->stats.nrequests_large++;
|
||||
arena->stats.allocated_large += size;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
|
||||
}
|
||||
if (config_prof)
|
||||
arena_prof_accum(arena, size);
|
||||
@ -1401,9 +1394,9 @@ arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
|
||||
arena->stats.nmalloc_large++;
|
||||
arena->stats.nrequests_large++;
|
||||
arena->stats.allocated_large += size;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
|
||||
@ -1428,13 +1421,12 @@ arena_salloc(const void *ptr)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
||||
@ -1458,11 +1450,11 @@ arena_prof_promoted(const void *ptr, size_t size)
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
assert(isalloc(ptr) == PAGE_SIZE);
|
||||
assert(isalloc(ptr) == PAGE);
|
||||
assert(size <= SMALL_MAXCLASS);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
binind = SMALL_SIZE2BIN(size);
|
||||
assert(binind < NBINS);
|
||||
chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
|
||||
@ -1480,13 +1472,12 @@ arena_salloc_demote(const void *ptr)
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = chunk->map[pageind-map_bias].bits;
|
||||
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE));
|
||||
size_t binind = arena_bin_index(chunk->arena, run->bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
assert(((uintptr_t)ptr - ((uintptr_t)run +
|
||||
@ -1496,7 +1487,7 @@ arena_salloc_demote(const void *ptr)
|
||||
} else {
|
||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||
ret = mapbits & ~PAGE_MASK;
|
||||
if (prof_promote && ret == PAGE_SIZE && (mapbits &
|
||||
if (prof_promote && ret == PAGE && (mapbits &
|
||||
CHUNK_MAP_CLASS_MASK) != 0) {
|
||||
size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
|
||||
CHUNK_MAP_CLASS_SHIFT) - 1;
|
||||
@ -1542,18 +1533,18 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
|
||||
assert(run != bin->runcur);
|
||||
assert(arena_run_tree_search(&bin->runs, &chunk->map[
|
||||
(((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL);
|
||||
(((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
|
||||
|
||||
binind = arena_bin_index(chunk->arena, run->bin);
|
||||
bin_info = &arena_bin_info[binind];
|
||||
|
||||
malloc_mutex_unlock(&bin->lock);
|
||||
/******************************/
|
||||
npages = bin_info->run_size >> PAGE_SHIFT;
|
||||
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
npages = bin_info->run_size >> LG_PAGE;
|
||||
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
|
||||
past = (size_t)(PAGE_CEILING((uintptr_t)run +
|
||||
(uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
|
||||
bin_info->reg_size) - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
bin_info->reg_size) - (uintptr_t)chunk) >> LG_PAGE);
|
||||
malloc_mutex_lock(&arena->lock);
|
||||
|
||||
/*
|
||||
@ -1573,8 +1564,8 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
|
||||
chunk->map[run_ind-map_bias].bits = bin_info->run_size |
|
||||
CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
|
||||
CHUNK_MAP_FLAGS_MASK);
|
||||
arena_run_trim_tail(arena, chunk, run, (npages << PAGE_SHIFT),
|
||||
((past - run_ind) << PAGE_SHIFT), false);
|
||||
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
|
||||
((past - run_ind) << LG_PAGE), false);
|
||||
/* npages = past - run_ind; */
|
||||
}
|
||||
arena_run_dalloc(arena, run, true);
|
||||
@ -1615,9 +1606,9 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
arena_bin_t *bin;
|
||||
size_t size;
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
||||
(mapelm->bits >> LG_PAGE)) << LG_PAGE));
|
||||
bin = run->bin;
|
||||
size_t binind = arena_bin_index(arena, bin);
|
||||
arena_bin_info_t *bin_info = &arena_bin_info[binind];
|
||||
@ -1692,8 +1683,7 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
{
|
||||
|
||||
if (config_fill || config_stats) {
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
||||
PAGE_SHIFT;
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
|
||||
|
||||
if (config_fill && config_stats && opt_junk)
|
||||
@ -1701,8 +1691,8 @@ arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
if (config_stats) {
|
||||
arena->stats.ndalloc_large++;
|
||||
arena->stats.allocated_large -= size;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1726,15 +1716,15 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
if (config_stats) {
|
||||
arena->stats.ndalloc_large++;
|
||||
arena->stats.allocated_large -= oldsize;
|
||||
arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
|
||||
arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
|
||||
|
||||
arena->stats.nmalloc_large++;
|
||||
arena->stats.nrequests_large++;
|
||||
arena->stats.allocated_large += size;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
}
|
||||
@ -1743,8 +1733,8 @@ static bool
|
||||
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t oldsize, size_t size, size_t extra, bool zero)
|
||||
{
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
size_t npages = oldsize >> PAGE_SHIFT;
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t npages = oldsize >> LG_PAGE;
|
||||
size_t followsize;
|
||||
|
||||
assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
|
||||
@ -1766,10 +1756,10 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
size_t splitsize = (oldsize + followsize <= size + extra)
|
||||
? followsize : size + extra - oldsize;
|
||||
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
|
||||
((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero);
|
||||
((pageind+npages) << LG_PAGE)), splitsize, true, zero);
|
||||
|
||||
size = oldsize + splitsize;
|
||||
npages = size >> PAGE_SHIFT;
|
||||
npages = size >> LG_PAGE;
|
||||
|
||||
/*
|
||||
* Mark the extended run as dirty if either portion of the run
|
||||
@ -1791,18 +1781,18 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
|
||||
if (config_stats) {
|
||||
arena->stats.ndalloc_large++;
|
||||
arena->stats.allocated_large -= oldsize;
|
||||
arena->stats.lstats[(oldsize >> PAGE_SHIFT)
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE)
|
||||
- 1].ndalloc++;
|
||||
arena->stats.lstats[(oldsize >> PAGE_SHIFT)
|
||||
arena->stats.lstats[(oldsize >> LG_PAGE)
|
||||
- 1].curruns--;
|
||||
|
||||
arena->stats.nmalloc_large++;
|
||||
arena->stats.nrequests_large++;
|
||||
arena->stats.allocated_large += size;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT)
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
|
||||
arena->stats.lstats[(size >> LG_PAGE)
|
||||
- 1].nrequests++;
|
||||
arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
|
||||
arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
|
||||
}
|
||||
malloc_mutex_unlock(&arena->lock);
|
||||
return (false);
|
||||
@ -2023,7 +2013,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
|
||||
uint32_t try_ctx0_offset, good_ctx0_offset;
|
||||
uint32_t try_reg0_offset, good_reg0_offset;
|
||||
|
||||
assert(min_run_size >= PAGE_SIZE);
|
||||
assert(min_run_size >= PAGE);
|
||||
assert(min_run_size <= arena_maxclass);
|
||||
|
||||
/*
|
||||
@ -2076,7 +2066,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
|
||||
good_reg0_offset = try_reg0_offset;
|
||||
|
||||
/* Try more aggressive settings. */
|
||||
try_run_size += PAGE_SIZE;
|
||||
try_run_size += PAGE;
|
||||
try_nregs = ((try_run_size - sizeof(arena_run_t)) /
|
||||
bin_info->reg_size)
|
||||
+ 1; /* Counter-act try_nregs-- in loop. */
|
||||
@ -2127,7 +2117,7 @@ static void
|
||||
bin_info_init(void)
|
||||
{
|
||||
arena_bin_info_t *bin_info;
|
||||
size_t prev_run_size = PAGE_SIZE;
|
||||
size_t prev_run_size = PAGE;
|
||||
|
||||
#define SIZE_CLASS(bin, delta, size) \
|
||||
bin_info = &arena_bin_info[bin]; \
|
||||
@ -2158,14 +2148,14 @@ arena_boot(void)
|
||||
*/
|
||||
map_bias = 0;
|
||||
for (i = 0; i < 3; i++) {
|
||||
header_size = offsetof(arena_chunk_t, map)
|
||||
+ (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
|
||||
map_bias = (header_size >> PAGE_SHIFT) + ((header_size &
|
||||
PAGE_MASK) != 0);
|
||||
header_size = offsetof(arena_chunk_t, map) +
|
||||
(sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
|
||||
map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
|
||||
!= 0);
|
||||
}
|
||||
assert(map_bias > 0);
|
||||
|
||||
arena_maxclass = chunksize - (map_bias << PAGE_SHIFT);
|
||||
arena_maxclass = chunksize - (map_bias << LG_PAGE);
|
||||
|
||||
bin_info_init();
|
||||
}
|
||||
|
@ -105,9 +105,9 @@ chunk_boot0(void)
|
||||
|
||||
/* Set variables according to the value of opt_lg_chunk. */
|
||||
chunksize = (ZU(1) << opt_lg_chunk);
|
||||
assert(chunksize >= PAGE_SIZE);
|
||||
assert(chunksize >= PAGE);
|
||||
chunksize_mask = chunksize - 1;
|
||||
chunk_npages = (chunksize >> PAGE_SHIFT);
|
||||
chunk_npages = (chunksize >> LG_PAGE);
|
||||
|
||||
if (config_stats || config_prof) {
|
||||
if (malloc_mutex_init(&chunks_mtx))
|
||||
|
13
src/ctl.c
13
src/ctl.c
@ -83,8 +83,7 @@ INDEX_PROTO(arenas_lrun_i)
|
||||
CTL_PROTO(arenas_narenas)
|
||||
CTL_PROTO(arenas_initialized)
|
||||
CTL_PROTO(arenas_quantum)
|
||||
CTL_PROTO(arenas_pagesize)
|
||||
CTL_PROTO(arenas_chunksize)
|
||||
CTL_PROTO(arenas_page)
|
||||
CTL_PROTO(arenas_tcache_max)
|
||||
CTL_PROTO(arenas_nbins)
|
||||
CTL_PROTO(arenas_nhbins)
|
||||
@ -227,8 +226,7 @@ static const ctl_node_t arenas_node[] = {
|
||||
{NAME("narenas"), CTL(arenas_narenas)},
|
||||
{NAME("initialized"), CTL(arenas_initialized)},
|
||||
{NAME("quantum"), CTL(arenas_quantum)},
|
||||
{NAME("pagesize"), CTL(arenas_pagesize)},
|
||||
{NAME("chunksize"), CTL(arenas_chunksize)},
|
||||
{NAME("page"), CTL(arenas_page)},
|
||||
{NAME("tcache_max"), CTL(arenas_tcache_max)},
|
||||
{NAME("nbins"), CTL(arenas_nbins)},
|
||||
{NAME("nhbins"), CTL(arenas_nhbins)},
|
||||
@ -520,7 +518,7 @@ ctl_refresh(void)
|
||||
+ ctl_stats.arenas[narenas].astats.allocated_large
|
||||
+ ctl_stats.huge.allocated;
|
||||
ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
|
||||
PAGE_SHIFT) + ctl_stats.huge.allocated;
|
||||
LG_PAGE) + ctl_stats.huge.allocated;
|
||||
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
|
||||
}
|
||||
|
||||
@ -1116,7 +1114,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
return (super_arenas_bin_i_node);
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
|
||||
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
|
||||
const ctl_node_t *
|
||||
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
|
||||
{
|
||||
@ -1155,8 +1153,7 @@ RETURN:
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
|
||||
CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
|
||||
CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
|
||||
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
||||
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
|
||||
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
|
||||
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
|
||||
|
@ -25,12 +25,6 @@ bool opt_xmalloc = false;
|
||||
bool opt_zero = false;
|
||||
size_t opt_narenas = 0;
|
||||
|
||||
#ifdef DYNAMIC_PAGE_SHIFT
|
||||
size_t pagesize;
|
||||
size_t pagesize_mask;
|
||||
size_t lg_pagesize;
|
||||
#endif
|
||||
|
||||
unsigned ncpus;
|
||||
|
||||
malloc_mutex_t arenas_lock;
|
||||
@ -477,7 +471,7 @@ malloc_conf_init(void)
|
||||
* Chunks always require at least one * header page,
|
||||
* plus one data page.
|
||||
*/
|
||||
CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, PAGE_SHIFT+1,
|
||||
CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE+1,
|
||||
(sizeof(size_t) << 3) - 1)
|
||||
CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX)
|
||||
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult,
|
||||
@ -550,25 +544,6 @@ malloc_init_hard(void)
|
||||
#endif
|
||||
malloc_initializer = INITIALIZER;
|
||||
|
||||
#ifdef DYNAMIC_PAGE_SHIFT
|
||||
/* Get page size. */
|
||||
{
|
||||
long result;
|
||||
|
||||
result = sysconf(_SC_PAGESIZE);
|
||||
assert(result != -1);
|
||||
pagesize = (size_t)result;
|
||||
|
||||
/*
|
||||
* We assume that pagesize is a power of 2 when calculating
|
||||
* pagesize_mask and lg_pagesize.
|
||||
*/
|
||||
assert(((result - 1) & result) == 0);
|
||||
pagesize_mask = result - 1;
|
||||
lg_pagesize = ffs((int)result) - 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
malloc_tsd_boot();
|
||||
if (config_prof)
|
||||
prof_boot0();
|
||||
@ -1145,7 +1120,7 @@ void *
|
||||
je_valloc(size_t size)
|
||||
{
|
||||
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
|
||||
imemalign(&ret, PAGE_SIZE, size, 1);
|
||||
imemalign(&ret, PAGE, size, 1);
|
||||
return (ret);
|
||||
}
|
||||
#endif
|
||||
@ -1386,7 +1361,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
|
||||
alignment, zero, no_move);
|
||||
if (q == NULL)
|
||||
goto ERR;
|
||||
if (max_usize < PAGE_SIZE) {
|
||||
if (max_usize < PAGE) {
|
||||
usize = max_usize;
|
||||
arena_prof_promoted(q, usize);
|
||||
} else
|
||||
|
@ -1179,7 +1179,7 @@ prof_boot1(void)
|
||||
prof_interval = 0;
|
||||
}
|
||||
|
||||
prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT);
|
||||
prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -412,6 +412,9 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
|
||||
CTL_GET("arenas.quantum", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
|
||||
|
||||
CTL_GET("arenas.page", &sv, size_t);
|
||||
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
|
||||
|
||||
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
|
||||
if (ssv >= 0) {
|
||||
malloc_cprintf(write_cb, cbopaque,
|
||||
|
10
src/tcache.c
10
src/tcache.c
@ -72,7 +72,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk->arena == arena) {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_chunk_map_t *mapelm =
|
||||
&chunk->map[pageind-map_bias];
|
||||
arena_dalloc_bin(arena, chunk, ptr, mapelm);
|
||||
@ -303,11 +303,11 @@ tcache_destroy(tcache_t *tcache)
|
||||
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
|
||||
arena_t *arena = chunk->arena;
|
||||
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
|
||||
PAGE_SHIFT;
|
||||
LG_PAGE;
|
||||
arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
|
||||
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
|
||||
(uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) <<
|
||||
PAGE_SHIFT));
|
||||
(uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
|
||||
LG_PAGE));
|
||||
arena_bin_t *bin = run->bin;
|
||||
|
||||
malloc_mutex_lock(&bin->lock);
|
||||
@ -398,7 +398,7 @@ tcache_boot0(void)
|
||||
else
|
||||
tcache_maxclass = (1U << opt_lg_tcache_max);
|
||||
|
||||
nhbins = NBINS + (tcache_maxclass >> PAGE_SHIFT);
|
||||
nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
|
||||
|
||||
/* Initialize tcache_bin_info. */
|
||||
tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
|
||||
|
@ -78,7 +78,7 @@ zone_valloc(malloc_zone_t *zone, size_t size)
|
||||
{
|
||||
void *ret = NULL; /* Assignment avoids useless compiler warning. */
|
||||
|
||||
je_posix_memalign(&ret, PAGE_SIZE, size);
|
||||
je_posix_memalign(&ret, PAGE, size);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user