Fix arenas_cache_cleanup() and arena_get_hard().

Fix arenas_cache_cleanup() and arena_get_hard() to handle
allocation/deallocation within the application's thread-specific data
cleanup functions even after arenas_cache is torn down.

This is a more general fix that complements
45e9f66c28 (Fix arenas_cache_cleanup().).
This commit is contained in:
Jason Evans 2015-08-25 16:13:59 -07:00
parent 5d2e875ac9
commit 30949da601
2 changed files with 8 additions and 9 deletions

View File

@ -7,9 +7,9 @@ brevity. Much more detail can be found in the git revision history:
* 4.0.1 (XXX)
Bug fixes:
- Fix arenas_cache_cleanup() to handle allocation/deallocation within the
application's thread-specific data cleanup functions even after
arenas_cache is torn down.
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
allocation/deallocation within the application's thread-specific data
cleanup functions even after arenas_cache is torn down.
- Don't bitshift by negative amounts when encoding/decoding run sizes in chunk
header maps. This affected systems with page sizes greater than 8 KiB.
- Rename index_t to szind_t to avoid an existing type on Solaris.

View File

@ -510,17 +510,17 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
assert(ind < narenas_actual || !init_if_missing);
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
if (!*arenas_cache_bypassp) {
if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
*arenas_cache_bypassp = true;
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
narenas_cache);
*arenas_cache_bypassp = false;
} else
arenas_cache = NULL;
}
if (arenas_cache == NULL) {
/*
* This function must always tell the truth, even if
* it's slow, so don't let OOM or recursive allocation
* it's slow, so don't let OOM, thread cleanup (note
* tsd_nominal check), nor recursive allocation
* avoidance (note arenas_cache_bypass check) get in the
* way.
*/
@ -531,6 +531,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
malloc_mutex_unlock(&arenas_lock);
return (arena);
}
assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
tsd_arenas_cache_set(tsd, arenas_cache);
tsd_narenas_cache_set(tsd, narenas_cache);
}
@ -650,8 +651,6 @@ arenas_cache_cleanup(tsd_t *tsd)
arenas_cache = tsd_arenas_cache_get(tsd);
if (arenas_cache != NULL) {
bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
*arenas_cache_bypassp = true;
tsd_arenas_cache_set(tsd, NULL);
a0dalloc(arenas_cache);
}