Fix chunk_alloc_cache() to support decommitted allocation.
Fix chunk_alloc_cache() to support decommitted allocation, and use this ability in arena_chunk_alloc_internal() and arena_stash_dirty(), so that chunks don't get permanently stuck in a hybrid state. This resolves #487.
This commit is contained in:
parent
dd3ed23aea
commit
e9012630ac
@ -58,7 +58,7 @@ void chunk_deregister(const void *chunk, const extent_node_t *node);
|
|||||||
void *chunk_alloc_base(size_t size);
|
void *chunk_alloc_base(size_t size);
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool dalloc_node);
|
bool *zero, bool *commit, bool dalloc_node);
|
||||||
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit);
|
bool *zero, bool *commit);
|
||||||
|
16
src/arena.c
16
src/arena.c
@ -30,6 +30,8 @@ unsigned nhclasses; /* Number of huge size classes. */
|
|||||||
* definition.
|
* definition.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
arena_chunk_t *chunk);
|
||||||
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
|
static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
|
||||||
size_t ndirty_limit);
|
size_t ndirty_limit);
|
||||||
static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
|
static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
|
||||||
@ -579,14 +581,13 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
|||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
||||||
chunksize, zero, true);
|
chunksize, zero, commit, true);
|
||||||
if (chunk != NULL) {
|
if (chunk != NULL) {
|
||||||
if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
|
if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
||||||
chunksize, true);
|
chunksize, true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
*commit = true;
|
|
||||||
}
|
}
|
||||||
if (chunk == NULL) {
|
if (chunk == NULL) {
|
||||||
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
|
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
|
||||||
@ -883,6 +884,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
void *ret;
|
void *ret;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t csize = CHUNK_CEILING(usize);
|
size_t csize = CHUNK_CEILING(usize);
|
||||||
|
bool commit = true;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
@ -894,7 +896,7 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
arena_nactive_add(arena, usize >> LG_PAGE);
|
arena_nactive_add(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
|
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
|
||||||
alignment, zero, true);
|
alignment, zero, &commit, true);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
||||||
@ -1004,6 +1006,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
|
||||||
size_t udiff = usize - oldsize;
|
size_t udiff = usize - oldsize;
|
||||||
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
||||||
|
bool commit = true;
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
@ -1015,7 +1018,7 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
arena_nactive_add(arena, udiff >> LG_PAGE);
|
arena_nactive_add(arena, udiff >> LG_PAGE);
|
||||||
|
|
||||||
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
||||||
chunksize, zero, true) == NULL);
|
chunksize, zero, &commit, true) == NULL);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (err) {
|
if (err) {
|
||||||
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
|
err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
|
||||||
@ -1512,7 +1515,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
extent_node_t *chunkselm_next;
|
extent_node_t *chunkselm_next;
|
||||||
bool zero;
|
bool zero, commit;
|
||||||
UNUSED void *chunk;
|
UNUSED void *chunk;
|
||||||
|
|
||||||
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
|
npages = extent_node_size_get(chunkselm) >> LG_PAGE;
|
||||||
@ -1526,10 +1529,11 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
* dalloc_node=false argument to chunk_alloc_cache().
|
* dalloc_node=false argument to chunk_alloc_cache().
|
||||||
*/
|
*/
|
||||||
zero = false;
|
zero = false;
|
||||||
|
commit = false;
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
||||||
extent_node_addr_get(chunkselm),
|
extent_node_addr_get(chunkselm),
|
||||||
extent_node_size_get(chunkselm), chunksize, &zero,
|
extent_node_size_get(chunkselm), chunksize, &zero,
|
||||||
false);
|
&commit, false);
|
||||||
assert(chunk == extent_node_addr_get(chunkselm));
|
assert(chunk == extent_node_addr_get(chunkselm));
|
||||||
assert(zero == extent_node_zeroed_get(chunkselm));
|
assert(zero == extent_node_zeroed_get(chunkselm));
|
||||||
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
|
extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
|
||||||
|
@ -385,23 +385,21 @@ chunk_alloc_base(size_t size)
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
|
||||||
|
bool dalloc_node)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
bool commit;
|
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
commit = true;
|
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
ret = chunk_recycle(tsdn, arena, chunk_hooks,
|
||||||
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
|
&arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
|
||||||
new_addr, size, alignment, zero, &commit, dalloc_node);
|
new_addr, size, alignment, zero, commit, dalloc_node);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
assert(commit);
|
|
||||||
if (config_valgrind)
|
if (config_valgrind)
|
||||||
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
|
||||||
return (ret);
|
return (ret);
|
||||||
|
Loading…
Reference in New Issue
Block a user