Refactor chunk map.

Break the chunk map into two separate arrays, in order to improve cache
locality. This is related to issue #23.
This commit is contained in:
Qinfan Wu
2014-08-29 13:34:40 -07:00
committed by Jason Evans
parent f34f6037e8
commit ff6a31d3b9
7 changed files with 186 additions and 149 deletions

View File

@@ -61,55 +61,57 @@ static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
/******************************************************************************/
JEMALLOC_INLINE_C size_t
arena_mapelm_to_pageind(arena_chunk_map_t *mapelm)
arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
{
uintptr_t map_offset =
CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map);
size_t offset = CHUNK_ADDR2OFFSET(miscelm);
return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias);
return ((offset - map_misc_offset) / sizeof(arena_chunk_map_misc_t) +
map_bias);
}
JEMALLOC_INLINE_C size_t
arena_mapelm_to_bits(arena_chunk_map_t *mapelm)
arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
return (mapelm->bits);
return arena_mapbits_get(chunk, pageind);
}
static inline int
arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
rb_link, arena_run_comp)
static inline int
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
int ret;
size_t a_size;
size_t b_size = arena_mapelm_to_bits(b) & ~PAGE_MASK;
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
if (a_mapelm & CHUNK_MAP_KEY)
a_size = a_mapelm & ~PAGE_MASK;
else
a_size = arena_mapelm_to_bits(a) & ~PAGE_MASK;
if (a_miscelm & CHUNK_MAP_KEY)
a_size = a_miscelm & ~PAGE_MASK;
else
a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
if (!(a_mapelm & CHUNK_MAP_KEY))
ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
if (!(a_miscelm & CHUNK_MAP_KEY))
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
else {
/*
* Treat keys as if they are lower than anything else.
@@ -122,8 +124,8 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
rb_link, arena_avail_comp)
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
arena_chunk_map_misc_t, rb_link, arena_avail_comp)
static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
@@ -132,7 +134,7 @@ arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
}
@@ -143,7 +145,7 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
}
@@ -151,14 +153,14 @@ static void
arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
ql_elm_new(mapelm, dr_link);
ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
ql_elm_new(miscelm, dr_link);
ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
arena->ndirty += npages;
}
@@ -166,13 +168,13 @@ static void
arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
ql_remove(&arena->runs_dirty, mapelm, dr_link);
ql_remove(&arena->runs_dirty, miscelm, dr_link);
arena->ndirty -= npages;
}
@@ -532,16 +534,17 @@ arena_chunk_init_hard(arena_t *arena)
*/
if (zero == false) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) -
(uintptr_t)arena_mapp_get(chunk, map_bias+1)));
(void *)arena_bitselm_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_bitselm_get(chunk,
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
arena_bitselm_get(chunk, chunk_npages-1) -
(uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@@ -641,14 +644,14 @@ static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm;
arena_chunk_map_t *key;
arena_chunk_map_misc_t *miscelm;
arena_chunk_map_misc_t *key;
key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
@@ -695,14 +698,14 @@ static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
{
arena_run_t *run;
arena_chunk_map_t *mapelm;
arena_chunk_map_t *key;
arena_chunk_map_misc_t *miscelm;
arena_chunk_map_misc_t *key;
key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
@@ -769,13 +772,13 @@ static size_t
arena_dirty_count(arena_t *arena)
{
size_t ndirty = 0;
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
arena_chunk_t *chunk;
size_t pageind, npages;
ql_foreach(mapelm, &arena->runs_dirty, dr_link) {
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
@@ -808,16 +811,17 @@ arena_compute_npurge(arena_t *arena, bool all)
static size_t
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
arena_chunk_mapelms_t *mapelms)
arena_chunk_miscelms_t *miscelms)
{
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
size_t nstashed = 0;
/* Add at least npurge pages to purge_list. */
for (mapelm = ql_first(&arena->runs_dirty); mapelm != NULL;
mapelm = ql_first(&arena->runs_dirty)) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
miscelm = ql_first(&arena->runs_dirty)) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t run_size = arena_mapbits_unallocated_size_get(chunk,
pageind);
size_t npages = run_size >> LG_PAGE;
@@ -838,8 +842,8 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
/* Append to purge_list for later processing. */
ql_elm_new(mapelm, dr_link);
ql_tail_insert(mapelms, mapelm, dr_link);
ql_elm_new(miscelm, dr_link);
ql_tail_insert(miscelms, miscelm, dr_link);
nstashed += npages;
@@ -851,10 +855,10 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
}
static size_t
arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
{
size_t npurged, nmadvise;
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
if (config_stats)
nmadvise = 0;
@@ -862,13 +866,13 @@ arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
malloc_mutex_unlock(&arena->lock);
ql_foreach(mapelm, mapelms, dr_link) {
ql_foreach(miscelm, miscelms, dr_link) {
arena_chunk_t *chunk;
size_t pageind, run_size, npages, flag_unzeroed, i;
bool unzeroed;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
@@ -908,18 +912,19 @@ arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
}
static void
arena_unstash_purged(arena_t *arena, arena_chunk_mapelms_t *mapelms)
arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
{
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
/* Deallocate runs. */
for (mapelm = ql_first(mapelms); mapelm != NULL;
mapelm = ql_first(mapelms)) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
for (miscelm = ql_first(miscelms); miscelm != NULL;
miscelm = ql_first(miscelms)) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)(pageind << LG_PAGE));
ql_remove(mapelms, mapelm, dr_link);
ql_remove(miscelms, miscelm, dr_link);
arena_run_dalloc(arena, run, false, true);
}
}
@@ -928,7 +933,7 @@ void
arena_purge(arena_t *arena, bool all)
{
size_t npurge, npurgeable, npurged;
arena_chunk_mapelms_t purge_list;
arena_chunk_miscelms_t purge_list;
if (config_debug) {
size_t ndirty = arena_dirty_count(arena);
@@ -1180,14 +1185,14 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
if (miscelm != NULL) {
arena_chunk_t *chunk;
size_t pageind;
arena_run_t *run;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) <<
LG_PAGE));
@@ -1202,11 +1207,11 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
arena_run_tree_insert(&bin->runs, mapelm);
arena_run_tree_insert(&bin->runs, miscelm);
}
static void
@@ -1214,11 +1219,11 @@ arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
arena_run_tree_remove(&bin->runs, mapelm);
arena_run_tree_remove(&bin->runs, miscelm);
}
static arena_run_t *
@@ -1684,9 +1689,8 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t npages, run_ind, past;
assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs,
arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
== NULL);
assert(arena_run_tree_search(&bin->runs, arena_miscelm_get(chunk,
((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) == NULL);
binind = arena_bin_index(chunk->arena, run->bin);
bin_info = &arena_bin_info[binind];
@@ -1749,7 +1753,7 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
void
arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
arena_chunk_map_bits_t *bitselm)
{
size_t pageind;
arena_run_t *run;
@@ -1761,7 +1765,8 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind));
binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind));
bin_info = &arena_bin_info[binind];
if (config_fill || config_stats)
size = bin_info->reg_size;
@@ -1784,7 +1789,7 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm)
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
arena_bin_t *bin;
@@ -1793,7 +1798,7 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
arena_dalloc_bin_locked(arena, chunk, ptr, bitselm);
malloc_mutex_unlock(&bin->lock);
}
@@ -1801,15 +1806,15 @@ void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
{
arena_chunk_map_t *mapelm;
arena_chunk_map_bits_t *bitselm;
if (config_debug) {
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
bitselm = arena_bitselm_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET
@@ -2268,7 +2273,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* be twice as large in order to maintain alignment.
*/
if (config_fill && opt_redzone) {
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1);
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
@@ -2404,13 +2410,17 @@ arena_boot(void)
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
header_size = offsetof(arena_chunk_t, map) +
(sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
header_size = offsetof(arena_chunk_t, map_bits) +
((sizeof(arena_chunk_map_bits_t) +
sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
!= 0);
}
assert(map_bias > 0);
map_misc_offset = offsetof(arena_chunk_t, map_bits) +
sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
arena_maxclass = chunksize - (map_bias << LG_PAGE);
bin_info_init();

View File

@@ -28,6 +28,7 @@ size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/

View File

@@ -118,10 +118,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
arena_mapp_get(chunk, pageind);
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get(chunk, pageind);
arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
bitselm);
} else {
/*
* This object was allocated via a different
@@ -393,9 +393,10 @@ tcache_destroy(tcache_t *tcache)
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_bits_t *bitselm = arena_bitselm_get(chunk,
pageind);
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
arena_dalloc_bin(arena, chunk, tcache, pageind, bitselm);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;