From 4f929aa94853ecd7da2791f462d1b972ee66db8e Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Mon, 22 Apr 2013 22:36:18 -0700 Subject: [PATCH] Fix another deadlock related to chunk_record(). Fix chunk_record() to unlock chunks_mtx before deallocating a base node, in order to avoid potential deadlock. This fix addresses the second of two similar bugs. --- src/chunk.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/chunk.c b/src/chunk.c index e8fc473c..aef3fede 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -214,7 +214,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, size_t size) { bool unzeroed; - extent_node_t *xnode, *node, *prev, key; + extent_node_t *xnode, *node, *prev, *xprev, key; unzeroed = pages_purge(chunk, size); VALGRIND_MAKE_MEM_NOACCESS(chunk, size); @@ -226,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, * held. */ xnode = base_node_alloc(); + /* Use xprev to implement conditional deferred deallocation of prev. */ + xprev = NULL; malloc_mutex_lock(&chunks_mtx); key.addr = (void *)((uintptr_t)chunk + size); @@ -280,18 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, node->zeroed = (node->zeroed && prev->zeroed); extent_tree_szad_insert(chunks_szad, node); - base_node_dealloc(prev); + xprev = prev; } label_return: malloc_mutex_unlock(&chunks_mtx); - if (xnode != NULL) { - /* - * Deallocate xnode after unlocking chunks_mtx in order to - * avoid potential deadlock. - */ + /* + * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to + * avoid potential deadlock. + */ + if (xnode != NULL) base_node_dealloc(xnode); - } + if (xprev != NULL) + base_node_dealloc(prev); } void