Fix arena chunk purge/dealloc race conditions.
Fix arena_chunk_dealloc() to put the new spare in a consistent state before dropping the arena mutex to deallocate the previous spare. Fix arena_run_dalloc() to insert a newly dirtied chunk into the chunks_dirty list before potentially deallocating the chunk, so that dirty page accounting is self-consistent.
This commit is contained in:
parent
5065156f3f
commit
8d4203c72d
@ -470,23 +470,6 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
{
|
{
|
||||||
arena_avail_tree_t *runs_avail;
|
arena_avail_tree_t *runs_avail;
|
||||||
|
|
||||||
while (arena->spare != NULL) {
|
|
||||||
arena_chunk_t *spare = arena->spare;
|
|
||||||
|
|
||||||
arena->spare = NULL;
|
|
||||||
if (spare->dirtied) {
|
|
||||||
ql_remove(&chunk->arena->chunks_dirty, spare,
|
|
||||||
link_dirty);
|
|
||||||
arena->ndirty -= spare->ndirty;
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(&arena->lock);
|
|
||||||
chunk_dealloc((void *)spare, chunksize);
|
|
||||||
malloc_mutex_lock(&arena->lock);
|
|
||||||
#ifdef JEMALLOC_STATS
|
|
||||||
arena->stats.mapped -= chunksize;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Remove run from the appropriate runs_avail_* tree, so that the arena
|
* Remove run from the appropriate runs_avail_* tree, so that the arena
|
||||||
* does not use it.
|
* does not use it.
|
||||||
@ -499,7 +482,23 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
|
|||||||
arena_avail_tree_remove(runs_avail,
|
arena_avail_tree_remove(runs_avail,
|
||||||
&chunk->map[arena_chunk_header_npages]);
|
&chunk->map[arena_chunk_header_npages]);
|
||||||
|
|
||||||
arena->spare = chunk;
|
if (arena->spare != NULL) {
|
||||||
|
arena_chunk_t *spare = arena->spare;
|
||||||
|
|
||||||
|
arena->spare = chunk;
|
||||||
|
if (spare->dirtied) {
|
||||||
|
ql_remove(&chunk->arena->chunks_dirty, spare,
|
||||||
|
link_dirty);
|
||||||
|
arena->ndirty -= spare->ndirty;
|
||||||
|
}
|
||||||
|
malloc_mutex_unlock(&arena->lock);
|
||||||
|
chunk_dealloc((void *)spare, chunksize);
|
||||||
|
malloc_mutex_lock(&arena->lock);
|
||||||
|
#ifdef JEMALLOC_STATS
|
||||||
|
arena->stats.mapped -= chunksize;
|
||||||
|
#endif
|
||||||
|
} else
|
||||||
|
arena->spare = chunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
@ -925,6 +924,18 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
|||||||
/* Insert into runs_avail, now that coalescing is complete. */
|
/* Insert into runs_avail, now that coalescing is complete. */
|
||||||
arena_avail_tree_insert(runs_avail, &chunk->map[run_ind]);
|
arena_avail_tree_insert(runs_avail, &chunk->map[run_ind]);
|
||||||
|
|
||||||
|
if (dirty) {
|
||||||
|
/*
|
||||||
|
* Insert into chunks_dirty before potentially calling
|
||||||
|
* arena_chunk_dealloc(), so that chunks_dirty and
|
||||||
|
* arena->ndirty are consistent.
|
||||||
|
*/
|
||||||
|
if (chunk->dirtied == false) {
|
||||||
|
ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
|
||||||
|
chunk->dirtied = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Deallocate chunk if it is now completely unused. The bit
|
* Deallocate chunk if it is now completely unused. The bit
|
||||||
* manipulation checks whether the first run is unallocated and extends
|
* manipulation checks whether the first run is unallocated and extends
|
||||||
@ -935,19 +946,14 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
|
|||||||
arena_chunk_dealloc(arena, chunk);
|
arena_chunk_dealloc(arena, chunk);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It is okay to do dirty page processing even if the chunk was
|
* It is okay to do dirty page processing here even if the chunk was
|
||||||
* deallocated above, since in that case it is the spare. Waiting
|
* deallocated above, since in that case it is the spare. Waiting
|
||||||
* until after possible chunk deallocation to do dirty processing
|
* until after possible chunk deallocation to do dirty processing
|
||||||
* allows for an old spare to be fully deallocated, thus decreasing the
|
* allows for an old spare to be fully deallocated, thus decreasing the
|
||||||
* chances of spuriously crossing the dirty page purging threshold.
|
* chances of spuriously crossing the dirty page purging threshold.
|
||||||
*/
|
*/
|
||||||
if (dirty) {
|
if (dirty)
|
||||||
if (chunk->dirtied == false) {
|
|
||||||
ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
|
|
||||||
chunk->dirtied = true;
|
|
||||||
}
|
|
||||||
arena_maybe_purge(arena);
|
arena_maybe_purge(arena);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
Loading…
Reference in New Issue
Block a user