Add arena chunk map assertions.

This commit is contained in:
Jason Evans 2012-05-10 17:09:17 -07:00
parent 5b0c99649f
commit 30fe12b866

View File

@ -353,17 +353,20 @@ arena_chunk_alloc(arena_t *arena)
chunk = arena->spare; chunk = arena->spare;
arena->spare = NULL; arena->spare = NULL;
/* Insert the run into the appropriate runs_avail_* tree. */ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
if (arena_mapbits_dirty_get(chunk, map_bias) == 0) assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
runs_avail = &arena->runs_avail_clean;
else
runs_avail = &arena->runs_avail_dirty;
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass); arena_maxclass);
assert(arena_mapbits_unallocated_size_get(chunk, assert(arena_mapbits_unallocated_size_get(chunk,
chunk_npages-1) == arena_maxclass); chunk_npages-1) == arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) == assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1)); arena_mapbits_dirty_get(chunk, chunk_npages-1));
/* Insert the run into the appropriate runs_avail_* tree. */
if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
runs_avail = &arena->runs_avail_clean;
else
runs_avail = &arena->runs_avail_dirty;
arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
map_bias)); map_bias));
} else { } else {
@ -427,6 +430,15 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{ {
arena_avail_tree_t *runs_avail; arena_avail_tree_t *runs_avail;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass);
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
/* /*
* Remove run from the appropriate runs_avail_* tree, so that the arena * Remove run from the appropriate runs_avail_* tree, so that the arena
* does not use it. * does not use it.
@ -578,6 +590,8 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
*/ */
if (chunk == arena->spare) { if (chunk == arena->spare) {
assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
arena_chunk_alloc(arena); arena_chunk_alloc(arena);
} }
@ -590,7 +604,9 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
npages = arena_mapbits_unallocated_size_get(chunk, npages = arena_mapbits_unallocated_size_get(chunk,
pageind) >> LG_PAGE; pageind) >> LG_PAGE;
assert(pageind + npages <= chunk_npages); assert(pageind + npages <= chunk_npages);
if (arena_mapbits_dirty_get(chunk, pageind)) { assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
size_t i; size_t i;
arena_avail_tree_remove( arena_avail_tree_remove(
@ -832,6 +848,8 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
* The run is dirty if the caller claims to have dirtied it, as well as * The run is dirty if the caller claims to have dirtied it, as well as
* if it was already dirty before being allocated. * if it was already dirty before being allocated.
*/ */
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
if (arena_mapbits_dirty_get(chunk, run_ind) != 0) if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true; dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
@ -931,9 +949,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
if (size == arena_maxclass) { if (size == arena_maxclass) {
assert(run_ind == map_bias); assert(run_ind == map_bias);
assert(run_pages == (arena_maxclass >> LG_PAGE)); assert(run_pages == (arena_maxclass >> LG_PAGE));
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
arena_maxclass);
arena_chunk_dealloc(arena, chunk); arena_chunk_dealloc(arena, chunk);
} }
@ -1514,16 +1529,16 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* trim the clean pages before deallocating the dirty portion of the * trim the clean pages before deallocating the dirty portion of the
* run. * run.
*/ */
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+npages-1));
if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
npages) { npages) {
/* /* Trim clean pages. Convert to large run beforehand. */
* Trim clean pages. Convert to large run beforehand. Set the assert(npages > 0);
* last map element first, in case this is a one-page run.
*/
arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
arena_mapbits_unzeroed_get(chunk, run_ind)); arena_mapbits_unzeroed_get(chunk, run_ind));
arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
((past - run_ind) << LG_PAGE), false); ((past - run_ind) << LG_PAGE), false);
/* npages = past - run_ind; */ /* npages = past - run_ind; */