Fix arenas_cache_cleanup() and arena_get_hard().
Fix arenas_cache_cleanup() and arena_get_hard() to handle
allocation/deallocation within the application's thread-specific data
cleanup functions even after arenas_cache is torn down.
This is a more general fix that complements
45e9f66c28
(Fix arenas_cache_cleanup().).
This commit is contained in:
parent
5d2e875ac9
commit
30949da601
@ -7,9 +7,9 @@ brevity. Much more detail can be found in the git revision history:
|
|||||||
* 4.0.1 (XXX)
|
* 4.0.1 (XXX)
|
||||||
|
|
||||||
Bug fixes:
|
Bug fixes:
|
||||||
- Fix arenas_cache_cleanup() to handle allocation/deallocation within the
|
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
|
||||||
application's thread-specific data cleanup functions even after
|
allocation/deallocation within the application's thread-specific data
|
||||||
arenas_cache is torn down.
|
cleanup functions even after arenas_cache is torn down.
|
||||||
- Don't bitshift by negative amounts when encoding/decoding run sizes in chunk
|
- Don't bitshift by negative amounts when encoding/decoding run sizes in chunk
|
||||||
header maps. This affected systems with page sizes greater than 8 KiB.
|
header maps. This affected systems with page sizes greater than 8 KiB.
|
||||||
- Rename index_t to szind_t to avoid an existing type on Solaris.
|
- Rename index_t to szind_t to avoid an existing type on Solaris.
|
||||||
|
@ -510,17 +510,17 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
|||||||
assert(ind < narenas_actual || !init_if_missing);
|
assert(ind < narenas_actual || !init_if_missing);
|
||||||
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
|
narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
|
||||||
|
|
||||||
if (!*arenas_cache_bypassp) {
|
if (tsd_nominal(tsd) && !*arenas_cache_bypassp) {
|
||||||
*arenas_cache_bypassp = true;
|
*arenas_cache_bypassp = true;
|
||||||
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
|
arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
|
||||||
narenas_cache);
|
narenas_cache);
|
||||||
*arenas_cache_bypassp = false;
|
*arenas_cache_bypassp = false;
|
||||||
} else
|
}
|
||||||
arenas_cache = NULL;
|
|
||||||
if (arenas_cache == NULL) {
|
if (arenas_cache == NULL) {
|
||||||
/*
|
/*
|
||||||
* This function must always tell the truth, even if
|
* This function must always tell the truth, even if
|
||||||
* it's slow, so don't let OOM or recursive allocation
|
* it's slow, so don't let OOM, thread cleanup (note
|
||||||
|
* tsd_nominal check), nor recursive allocation
|
||||||
* avoidance (note arenas_cache_bypass check) get in the
|
* avoidance (note arenas_cache_bypass check) get in the
|
||||||
* way.
|
* way.
|
||||||
*/
|
*/
|
||||||
@ -531,6 +531,7 @@ arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
|
|||||||
malloc_mutex_unlock(&arenas_lock);
|
malloc_mutex_unlock(&arenas_lock);
|
||||||
return (arena);
|
return (arena);
|
||||||
}
|
}
|
||||||
|
assert(tsd_nominal(tsd) && !*arenas_cache_bypassp);
|
||||||
tsd_arenas_cache_set(tsd, arenas_cache);
|
tsd_arenas_cache_set(tsd, arenas_cache);
|
||||||
tsd_narenas_cache_set(tsd, narenas_cache);
|
tsd_narenas_cache_set(tsd, narenas_cache);
|
||||||
}
|
}
|
||||||
@ -650,8 +651,6 @@ arenas_cache_cleanup(tsd_t *tsd)
|
|||||||
|
|
||||||
arenas_cache = tsd_arenas_cache_get(tsd);
|
arenas_cache = tsd_arenas_cache_get(tsd);
|
||||||
if (arenas_cache != NULL) {
|
if (arenas_cache != NULL) {
|
||||||
bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
|
|
||||||
*arenas_cache_bypassp = true;
|
|
||||||
tsd_arenas_cache_set(tsd, NULL);
|
tsd_arenas_cache_set(tsd, NULL);
|
||||||
a0dalloc(arenas_cache);
|
a0dalloc(arenas_cache);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user