Remove arena_dalloc_bin_run() clean page preservation.

Remove code in arena_dalloc_bin_run() that preserved the "clean" state
of trailing clean pages by splitting them into a separate run during
deallocation.  This was a useful mechanism for reducing dirty page
churn when bin runs comprised many pages, but bin runs are now quite
small.

Remove the nextind field from arena_run_t now that it is no longer
needed, and change arena_run_t's bin field (arena_bin_t *) to binind
(index_t).  These two changes remove 8 bytes of chunk header overhead
per page, which saves 1/512 of all arena chunk memory.
This commit is contained in:
Jason Evans
2014-10-10 23:01:03 -07:00
parent 81e547566e
commit 381c23dd9d
2 changed files with 13 additions and 74 deletions

View File

@@ -36,11 +36,8 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_H_STRUCTS
struct arena_run_s {
/* Bin this run is associated with. */
arena_bin_t *bin;
/* Index of next region that has never been allocated, or nregs. */
uint32_t nextind;
/* Index of bin this run is associated with. */
index_t binind;
/* Number of free regions in run. */
unsigned nfree;
@@ -756,7 +753,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
index_t actual_binind;
index_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
@@ -774,9 +771,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
pageind);
miscelm = arena_miscelm_get(chunk, rpages_ind);
run = &miscelm->run;
bin = run->bin;
run_binind = run->binind;
bin = &arena->bins[run_binind];
actual_binind = bin - arena->bins;
assert(binind == actual_binind);
assert(run_binind == actual_binind);
bin_info = &arena_bin_info[actual_binind];
rpages = arena_miscelm_to_rpages(miscelm);
assert(((uintptr_t)ptr - ((uintptr_t)rpages +