Fix arena chunk purge/dealloc race conditions.

Fix arena_chunk_dealloc() to put the new spare in a consistent state before
dropping the arena mutex to deallocate the previous spare.

Fix arena_run_dalloc() to insert a newly dirtied chunk into the
chunks_dirty list before potentially deallocating the chunk, so that dirty
page accounting is self-consistent.
This commit is contained in:
Jason Evans 2010-04-13 20:53:21 -07:00
parent 5065156f3f
commit 8d4203c72d

View File

@ -470,23 +470,6 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
arena_avail_tree_t *runs_avail;
while (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = NULL;
if (spare->dirtied) {
ql_remove(&chunk->arena->chunks_dirty, spare,
link_dirty);
arena->ndirty -= spare->ndirty;
}
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize);
malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS
arena->stats.mapped -= chunksize;
#endif
}
/*
* Remove run from the appropriate runs_avail_* tree, so that the arena
* does not use it.
@ -499,6 +482,22 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena_avail_tree_remove(runs_avail,
&chunk->map[arena_chunk_header_npages]);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
if (spare->dirtied) {
ql_remove(&chunk->arena->chunks_dirty, spare,
link_dirty);
arena->ndirty -= spare->ndirty;
}
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize);
malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS
arena->stats.mapped -= chunksize;
#endif
} else
arena->spare = chunk;
}
@ -925,6 +924,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
/* Insert into runs_avail, now that coalescing is complete. */
arena_avail_tree_insert(runs_avail, &chunk->map[run_ind]);
if (dirty) {
/*
* Insert into chunks_dirty before potentially calling
* arena_chunk_dealloc(), so that chunks_dirty and
* arena->ndirty are consistent.
*/
if (chunk->dirtied == false) {
ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
chunk->dirtied = true;
}
}
/*
* Deallocate chunk if it is now completely unused. The bit
* manipulation checks whether the first run is unallocated and extends
@ -935,20 +946,15 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
arena_chunk_dealloc(arena, chunk);
/*
* It is okay to do dirty page processing even if the chunk was
* It is okay to do dirty page processing here even if the chunk was
* deallocated above, since in that case it is the spare. Waiting
* until after possible chunk deallocation to do dirty processing
* allows for an old spare to be fully deallocated, thus decreasing the
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty) {
if (chunk->dirtied == false) {
ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
chunk->dirtied = true;
}
if (dirty)
arena_maybe_purge(arena);
}
}
static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,