Refactor chunk map.

Break the chunk map into two separate arrays, in order to improve cache
locality. This is related to issue #23.
This commit is contained in:
Qinfan Wu 2014-08-29 13:34:40 -07:00 committed by Jason Evans
parent f34f6037e8
commit ff6a31d3b9
7 changed files with 186 additions and 149 deletions

View File

@ -43,7 +43,8 @@
*/
#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_chunk_map_s arena_chunk_map_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
typedef struct arena_chunk_s arena_chunk_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_bin_info_s arena_bin_info_t;
@ -55,34 +56,7 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_H_STRUCTS
/* Each element of the chunk map corresponds to one page within the chunk. */
struct arena_chunk_map_s {
#ifndef JEMALLOC_PROF
/*
* Overlay prof_tctx in order to allow it to be referenced by dead code.
* Such antics aren't warranted for per arena data structures, but
* chunk map overhead accounts for a percentage of memory, rather than
* being just a fixed cost.
*/
union {
#endif
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_t) rb_link;
/* Profile counters, used for large object runs. */
prof_tctx_t *prof_tctx;
#ifndef JEMALLOC_PROF
}; /* union { ... }; */
#endif
/* Linkage for list of dirty runs. */
ql_elm(arena_chunk_map_t) dr_link;
struct arena_chunk_map_bits_s {
/*
* Run address (or size) and various flags are stored together. The bit
* layout looks like (assuming 32-bit system):
@ -149,9 +123,43 @@ struct arena_chunk_map_s {
#define CHUNK_MAP_ALLOCATED ((size_t)0x1U)
#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
/*
* Each arena_chunk_map_misc_t corresponds to one page within the chunk, just
* like arena_chunk_map_bits_t. Two separate arrays are stored within each
* chunk header in order to improve cache locality.
*/
struct arena_chunk_map_misc_s {
#ifndef JEMALLOC_PROF
/*
* Overlay prof_tctx in order to allow it to be referenced by dead code.
* Such antics aren't warranted for per arena data structures, but
* chunk map overhead accounts for a percentage of memory, rather than
* being just a fixed cost.
*/
union {
#endif
/*
* Linkage for run trees. There are two disjoint uses:
*
* 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
rb_node(arena_chunk_map_misc_t) rb_link;
/* Profile counters, used for large object runs. */
prof_tctx_t *prof_tctx;
#ifndef JEMALLOC_PROF
}; /* union { ... }; */
#endif
/* Linkage for list of dirty runs. */
ql_elm(arena_chunk_map_misc_t) dr_link;
};
typedef rb_tree(arena_chunk_map_misc_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
typedef ql_head(arena_chunk_map_misc_t) arena_chunk_miscelms_t;
/* Arena chunk header. */
struct arena_chunk_s {
@ -164,7 +172,7 @@ struct arena_chunk_s {
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
arena_chunk_map_t map[1]; /* Dynamically sized. */
arena_chunk_map_bits_t map_bits[1]; /* Dynamically sized. */
};
struct arena_run_s {
@ -335,7 +343,7 @@ struct arena_s {
arena_avail_tree_t runs_avail;
/* List of dirty runs this arena manages. */
arena_chunk_mapelms_t runs_dirty;
arena_chunk_miscelms_t runs_dirty;
/*
* user-configureable chunk allocation and deallocation functions.
@ -393,9 +401,9 @@ void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm);
arena_chunk_map_bits_t *bitselm);
void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
size_t pageind, arena_chunk_map_bits_t *bitselm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
#ifdef JEMALLOC_JET
@ -439,7 +447,10 @@ size_t small_bin2size(size_t binind);
size_t small_s2u_compute(size_t size);
size_t small_s2u_lookup(size_t size);
size_t small_s2u(size_t size);
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
size_t pageind);
arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
@ -623,21 +634,32 @@ small_s2u(size_t size)
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return (&chunk->map[pageind-map_bias]);
return (&chunk->map_bits[pageind-map_bias]);
}
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
assert(pageind < chunk_npages);
return ((arena_chunk_map_misc_t *)((uintptr_t)chunk +
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
JEMALLOC_ALWAYS_INLINE size_t *
arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
{
return (&arena_mapp_get(chunk, pageind)->bits);
return (&arena_bitselm_get(chunk, pageind)->bits);
}
JEMALLOC_ALWAYS_INLINE size_t
@ -1005,7 +1027,7 @@ arena_prof_tctx_get(const void *ptr)
if ((mapbits & CHUNK_MAP_LARGE) == 0)
ret = (prof_tctx_t *)(uintptr_t)1U;
else
ret = arena_mapp_get(chunk, pageind)->prof_tctx;
ret = arena_miscelm_get(chunk, pageind)->prof_tctx;
return (ret);
}
@ -1025,7 +1047,7 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0)
arena_mapp_get(chunk, pageind)->prof_tctx = tctx;
arena_miscelm_get(chunk, pageind)->prof_tctx = tctx;
}
JEMALLOC_ALWAYS_INLINE void *

View File

@ -41,6 +41,7 @@ extern size_t chunksize;
extern size_t chunksize_mask; /* (chunksize - 1). */
extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t map_misc_offset;
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc_base(size_t size);

View File

@ -4,6 +4,7 @@ a0malloc
arena_alloc_junk_small
arena_bin_index
arena_bin_info
arena_bitselm_get
arena_boot
arena_chunk_alloc_huge
arena_chunk_dalloc_huge
@ -38,8 +39,8 @@ arena_mapbits_unzeroed_set
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapp_get
arena_maxclass
arena_miscelm_get
arena_new
arena_palloc
arena_postfork_child
@ -254,6 +255,7 @@ malloc_vcprintf
malloc_vsnprintf
malloc_write
map_bias
map_misc_offset
mb_write
mutex_boot
narenas_auto

View File

@ -240,7 +240,7 @@ cat <<EOF
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
* cannot support more than 256 small size classes. Further constrain NBINS to
* 255 since all small size classes, plus a "not small" size class must be
* stored in 8 bits of arena_chunk_map_t's bits field.
* stored in 8 bits of arena_chunk_map_bits_t's bits field.
*/
#if (NBINS > 255)
# error "Too many small size classes"

View File

@ -61,55 +61,57 @@ static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
/******************************************************************************/
JEMALLOC_INLINE_C size_t
arena_mapelm_to_pageind(arena_chunk_map_t *mapelm)
arena_miscelm_to_pageind(arena_chunk_map_misc_t *miscelm)
{
uintptr_t map_offset =
CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map);
size_t offset = CHUNK_ADDR2OFFSET(miscelm);
return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias);
return ((offset - map_misc_offset) / sizeof(arena_chunk_map_misc_t) +
map_bias);
}
JEMALLOC_INLINE_C size_t
arena_mapelm_to_bits(arena_chunk_map_t *mapelm)
arena_miscelm_to_bits(arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
return (mapelm->bits);
return arena_mapbits_get(chunk, pageind);
}
static inline int
arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
assert(a != NULL);
assert(b != NULL);
return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
rb_link, arena_run_comp)
static inline int
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b)
{
int ret;
size_t a_size;
size_t b_size = arena_mapelm_to_bits(b) & ~PAGE_MASK;
uintptr_t a_mapelm = (uintptr_t)a;
uintptr_t b_mapelm = (uintptr_t)b;
size_t b_size = arena_miscelm_to_bits(b) & ~PAGE_MASK;
uintptr_t a_miscelm = (uintptr_t)a;
uintptr_t b_miscelm = (uintptr_t)b;
if (a_mapelm & CHUNK_MAP_KEY)
a_size = a_mapelm & ~PAGE_MASK;
if (a_miscelm & CHUNK_MAP_KEY)
a_size = a_miscelm & ~PAGE_MASK;
else
a_size = arena_mapelm_to_bits(a) & ~PAGE_MASK;
a_size = arena_miscelm_to_bits(a) & ~PAGE_MASK;
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
if (!(a_mapelm & CHUNK_MAP_KEY))
ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
if (!(a_miscelm & CHUNK_MAP_KEY))
ret = (a_miscelm > b_miscelm) - (a_miscelm < b_miscelm);
else {
/*
* Treat keys as if they are lower than anything else.
@ -122,8 +124,8 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
}
/* Generate red-black tree functions. */
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
rb_link, arena_avail_comp)
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t,
arena_chunk_map_misc_t, rb_link, arena_avail_comp)
static void
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
@ -132,7 +134,7 @@ arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
arena_avail_tree_insert(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
}
@ -143,7 +145,7 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
arena_avail_tree_remove(&arena->runs_avail, arena_miscelm_get(chunk,
pageind));
}
@ -151,14 +153,14 @@ static void
arena_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
ql_elm_new(mapelm, dr_link);
ql_tail_insert(&arena->runs_dirty, mapelm, dr_link);
ql_elm_new(miscelm, dr_link);
ql_tail_insert(&arena->runs_dirty, miscelm, dr_link);
arena->ndirty += npages;
}
@ -166,13 +168,13 @@ static void
arena_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
CHUNK_MAP_DIRTY);
ql_remove(&arena->runs_dirty, mapelm, dr_link);
ql_remove(&arena->runs_dirty, miscelm, dr_link);
arena->ndirty -= npages;
}
@ -532,16 +534,17 @@ arena_chunk_init_hard(arena_t *arena)
*/
if (zero == false) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_mapp_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) -
(uintptr_t)arena_mapp_get(chunk, map_bias+1)));
(void *)arena_bitselm_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_bitselm_get(chunk,
chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_unzeroed_set(chunk, i, unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
map_bias+1)));
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
*)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
arena_bitselm_get(chunk, chunk_npages-1) -
(uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@ -641,14 +644,14 @@ static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm;
arena_chunk_map_t *key;
arena_chunk_map_misc_t *miscelm;
arena_chunk_map_misc_t *key;
key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
@ -695,14 +698,14 @@ static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
{
arena_run_t *run;
arena_chunk_map_t *mapelm;
arena_chunk_map_t *key;
arena_chunk_map_misc_t *miscelm;
arena_chunk_map_misc_t *key;
key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY);
mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
key = (arena_chunk_map_misc_t *)(size | CHUNK_MAP_KEY);
miscelm = arena_avail_tree_nsearch(&arena->runs_avail, key);
if (miscelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
@ -769,13 +772,13 @@ static size_t
arena_dirty_count(arena_t *arena)
{
size_t ndirty = 0;
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
arena_chunk_t *chunk;
size_t pageind, npages;
ql_foreach(mapelm, &arena->runs_dirty, dr_link) {
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
ql_foreach(miscelm, &arena->runs_dirty, dr_link) {
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
assert(arena_mapbits_allocated_get(chunk, pageind) == 0);
assert(arena_mapbits_large_get(chunk, pageind) == 0);
assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
@ -808,16 +811,17 @@ arena_compute_npurge(arena_t *arena, bool all)
static size_t
arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
arena_chunk_mapelms_t *mapelms)
arena_chunk_miscelms_t *miscelms)
{
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
size_t nstashed = 0;
/* Add at least npurge pages to purge_list. */
for (mapelm = ql_first(&arena->runs_dirty); mapelm != NULL;
mapelm = ql_first(&arena->runs_dirty)) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
for (miscelm = ql_first(&arena->runs_dirty); miscelm != NULL;
miscelm = ql_first(&arena->runs_dirty)) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
size_t run_size = arena_mapbits_unallocated_size_get(chunk,
pageind);
size_t npages = run_size >> LG_PAGE;
@ -838,8 +842,8 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
/* Append to purge_list for later processing. */
ql_elm_new(mapelm, dr_link);
ql_tail_insert(mapelms, mapelm, dr_link);
ql_elm_new(miscelm, dr_link);
ql_tail_insert(miscelms, miscelm, dr_link);
nstashed += npages;
@ -851,10 +855,10 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
}
static size_t
arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
arena_purge_stashed(arena_t *arena, arena_chunk_miscelms_t *miscelms)
{
size_t npurged, nmadvise;
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
if (config_stats)
nmadvise = 0;
@ -862,13 +866,13 @@ arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
malloc_mutex_unlock(&arena->lock);
ql_foreach(mapelm, mapelms, dr_link) {
ql_foreach(miscelm, miscelms, dr_link) {
arena_chunk_t *chunk;
size_t pageind, run_size, npages, flag_unzeroed, i;
bool unzeroed;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
run_size = arena_mapbits_large_size_get(chunk, pageind);
npages = run_size >> LG_PAGE;
@ -908,18 +912,19 @@ arena_purge_stashed(arena_t *arena, arena_chunk_mapelms_t *mapelms)
}
static void
arena_unstash_purged(arena_t *arena, arena_chunk_mapelms_t *mapelms)
arena_unstash_purged(arena_t *arena, arena_chunk_miscelms_t *miscelms)
{
arena_chunk_map_t *mapelm;
arena_chunk_map_misc_t *miscelm;
/* Deallocate runs. */
for (mapelm = ql_first(mapelms); mapelm != NULL;
mapelm = ql_first(mapelms)) {
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
size_t pageind = arena_mapelm_to_pageind(mapelm);
for (miscelm = ql_first(miscelms); miscelm != NULL;
miscelm = ql_first(miscelms)) {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)(pageind << LG_PAGE));
ql_remove(mapelms, mapelm, dr_link);
ql_remove(miscelms, miscelm, dr_link);
arena_run_dalloc(arena, run, false, true);
}
}
@ -928,7 +933,7 @@ void
arena_purge(arena_t *arena, bool all)
{
size_t npurge, npurgeable, npurged;
arena_chunk_mapelms_t purge_list;
arena_chunk_miscelms_t purge_list;
if (config_debug) {
size_t ndirty = arena_dirty_count(arena);
@ -1180,14 +1185,14 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
static arena_run_t *
arena_bin_runs_first(arena_bin_t *bin)
{
arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
if (miscelm != NULL) {
arena_chunk_t *chunk;
size_t pageind;
arena_run_t *run;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = arena_mapelm_to_pageind(mapelm);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) <<
LG_PAGE));
@ -1202,11 +1207,11 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
arena_run_tree_insert(&bin->runs, mapelm);
arena_run_tree_insert(&bin->runs, miscelm);
}
static void
@ -1214,11 +1219,11 @@ arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
arena_run_tree_remove(&bin->runs, mapelm);
arena_run_tree_remove(&bin->runs, miscelm);
}
static arena_run_t *
@ -1684,9 +1689,8 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t npages, run_ind, past;
assert(run != bin->runcur);
assert(arena_run_tree_search(&bin->runs,
arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
== NULL);
assert(arena_run_tree_search(&bin->runs, arena_miscelm_get(chunk,
((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) == NULL);
binind = arena_bin_index(chunk->arena, run->bin);
bin_info = &arena_bin_info[binind];
@ -1749,7 +1753,7 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
void
arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
arena_chunk_map_bits_t *bitselm)
{
size_t pageind;
arena_run_t *run;
@ -1761,7 +1765,8 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind));
binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind));
bin_info = &arena_bin_info[binind];
if (config_fill || config_stats)
size = bin_info->reg_size;
@ -1784,7 +1789,7 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void
arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm)
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
arena_bin_t *bin;
@ -1793,7 +1798,7 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
arena_dalloc_bin_locked(arena, chunk, ptr, bitselm);
malloc_mutex_unlock(&bin->lock);
}
@ -1801,15 +1806,15 @@ void
arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind)
{
arena_chunk_map_t *mapelm;
arena_chunk_map_bits_t *bitselm;
if (config_debug) {
/* arena_ptr_small_binind_get() does extra sanity checking. */
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
bitselm = arena_bitselm_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
}
#ifdef JEMALLOC_JET
@ -2268,7 +2273,8 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
* be twice as large in order to maintain alignment.
*/
if (config_fill && opt_redzone) {
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1);
size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) -
1);
if (align_min <= REDZONE_MINSIZE) {
bin_info->redzone_size = REDZONE_MINSIZE;
pad_size = 0;
@ -2404,13 +2410,17 @@ arena_boot(void)
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
header_size = offsetof(arena_chunk_t, map) +
(sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
header_size = offsetof(arena_chunk_t, map_bits) +
((sizeof(arena_chunk_map_bits_t) +
sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
!= 0);
}
assert(map_bias > 0);
map_misc_offset = offsetof(arena_chunk_t, map_bits) +
sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
arena_maxclass = chunksize - (map_bias << LG_PAGE);
bin_info_init();

View File

@ -28,6 +28,7 @@ size_t chunksize;
size_t chunksize_mask; /* (chunksize - 1). */
size_t chunk_npages;
size_t map_bias;
size_t map_misc_offset;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/

View File

@ -118,10 +118,10 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
arena_mapp_get(chunk, pageind);
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get(chunk, pageind);
arena_dalloc_bin_locked(arena, chunk, ptr,
mapelm);
bitselm);
} else {
/*
* This object was allocated via a different
@ -393,9 +393,10 @@ tcache_destroy(tcache_t *tcache)
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
LG_PAGE;
arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
arena_chunk_map_bits_t *bitselm = arena_bitselm_get(chunk,
pageind);
arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
arena_dalloc_bin(arena, chunk, tcache, pageind, bitselm);
} else if (tcache_size <= tcache_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;