Allow chunks to not be naturally aligned.
Precisely size extents for huge size classes that aren't multiples of chunksize.
This commit is contained in:
parent
741967e79d
commit
4731cd47f7
@ -486,8 +486,6 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
|||||||
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
||||||
size_t usize, size_t alignment, bool *zero);
|
size_t usize, size_t alignment, bool *zero);
|
||||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||||
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
|
|
||||||
extent_t *extent, size_t oldsize);
|
|
||||||
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, size_t oldsize);
|
extent_t *extent, size_t oldsize);
|
||||||
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
@ -90,6 +90,8 @@ ph_proto(, extent_heap_, extent_heap_t, extent_t)
|
|||||||
arena_t *extent_arena_get(const extent_t *extent);
|
arena_t *extent_arena_get(const extent_t *extent);
|
||||||
void *extent_addr_get(const extent_t *extent);
|
void *extent_addr_get(const extent_t *extent);
|
||||||
size_t extent_size_get(const extent_t *extent);
|
size_t extent_size_get(const extent_t *extent);
|
||||||
|
void *extent_before_get(const extent_t *extent);
|
||||||
|
void *extent_last_get(const extent_t *extent);
|
||||||
void *extent_past_get(const extent_t *extent);
|
void *extent_past_get(const extent_t *extent);
|
||||||
bool extent_active_get(const extent_t *extent);
|
bool extent_active_get(const extent_t *extent);
|
||||||
bool extent_dirty_get(const extent_t *extent);
|
bool extent_dirty_get(const extent_t *extent);
|
||||||
@ -137,6 +139,20 @@ extent_size_get(const extent_t *extent)
|
|||||||
return (extent->e_size);
|
return (extent->e_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void *
|
||||||
|
extent_before_get(const extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
return ((void *)(uintptr_t)extent->e_addr - PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE void *
|
||||||
|
extent_last_get(const extent_t *extent)
|
||||||
|
{
|
||||||
|
|
||||||
|
return ((void *)(uintptr_t)extent->e_addr + extent->e_size - PAGE);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void *
|
JEMALLOC_INLINE void *
|
||||||
extent_past_get(const extent_t *extent)
|
extent_past_get(const extent_t *extent)
|
||||||
{
|
{
|
||||||
|
@ -797,14 +797,14 @@ sa2u(size_t size, size_t alignment)
|
|||||||
return (usize);
|
return (usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Try for a large size class. */
|
/*
|
||||||
if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
|
* We can't achieve subpage alignment, so round up alignment to the
|
||||||
/*
|
* minimum that can actually be supported.
|
||||||
* We can't achieve subpage alignment, so round up alignment
|
*/
|
||||||
* to the minimum that can actually be supported.
|
alignment = PAGE_CEILING(alignment);
|
||||||
*/
|
|
||||||
alignment = PAGE_CEILING(alignment);
|
|
||||||
|
|
||||||
|
/* Try for a large size class. */
|
||||||
|
if (likely(size <= large_maxclass) && likely(alignment == PAGE)) {
|
||||||
/* Make sure result is a large size class. */
|
/* Make sure result is a large size class. */
|
||||||
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
|
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
|
||||||
|
|
||||||
@ -821,12 +821,6 @@ sa2u(size_t size, size_t alignment)
|
|||||||
if (unlikely(alignment > HUGE_MAXCLASS))
|
if (unlikely(alignment > HUGE_MAXCLASS))
|
||||||
return (0);
|
return (0);
|
||||||
|
|
||||||
/*
|
|
||||||
* We can't achieve subchunk alignment, so round up alignment to the
|
|
||||||
* minimum that can actually be supported.
|
|
||||||
*/
|
|
||||||
alignment = CHUNK_CEILING(alignment);
|
|
||||||
|
|
||||||
/* Make sure result is a huge size class. */
|
/* Make sure result is a huge size class. */
|
||||||
if (size <= chunksize)
|
if (size <= chunksize)
|
||||||
usize = chunksize;
|
usize = chunksize;
|
||||||
@ -839,7 +833,7 @@ sa2u(size_t size, size_t alignment)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate the multi-chunk mapping that huge_palloc() would need in
|
* Calculate the multi-page mapping that huge_palloc() would need in
|
||||||
* order to guarantee the alignment.
|
* order to guarantee the alignment.
|
||||||
*/
|
*/
|
||||||
if (usize + alignment < usize) {
|
if (usize + alignment < usize) {
|
||||||
|
@ -19,7 +19,6 @@ arena_chunk_cache_maybe_remove
|
|||||||
arena_chunk_dalloc_huge
|
arena_chunk_dalloc_huge
|
||||||
arena_chunk_ralloc_huge_expand
|
arena_chunk_ralloc_huge_expand
|
||||||
arena_chunk_ralloc_huge_shrink
|
arena_chunk_ralloc_huge_shrink
|
||||||
arena_chunk_ralloc_huge_similar
|
|
||||||
arena_cleanup
|
arena_cleanup
|
||||||
arena_dalloc
|
arena_dalloc
|
||||||
arena_dalloc_bin
|
arena_dalloc_bin
|
||||||
@ -211,6 +210,7 @@ extent_addr_set
|
|||||||
extent_alloc
|
extent_alloc
|
||||||
extent_arena_get
|
extent_arena_get
|
||||||
extent_arena_set
|
extent_arena_set
|
||||||
|
extent_before_get
|
||||||
extent_committed_get
|
extent_committed_get
|
||||||
extent_committed_set
|
extent_committed_set
|
||||||
extent_dalloc
|
extent_dalloc
|
||||||
@ -219,6 +219,7 @@ extent_dirty_insert
|
|||||||
extent_dirty_remove
|
extent_dirty_remove
|
||||||
extent_dirty_set
|
extent_dirty_set
|
||||||
extent_init
|
extent_init
|
||||||
|
extent_last_get
|
||||||
extent_past_get
|
extent_past_get
|
||||||
extent_prof_tctx_get
|
extent_prof_tctx_get
|
||||||
extent_prof_tctx_set
|
extent_prof_tctx_set
|
||||||
|
41
src/arena.c
41
src/arena.c
@ -653,7 +653,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
|
||||||
chunksize, zero, commit, true);
|
PAGE, zero, commit, true);
|
||||||
if (extent != NULL && !*commit) {
|
if (extent != NULL && !*commit) {
|
||||||
/* Commit header. */
|
/* Commit header. */
|
||||||
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
|
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0,
|
||||||
@ -676,7 +676,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
|||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
||||||
chunksize, chunksize, zero, true);
|
chunksize, PAGE, zero, true);
|
||||||
if (extent != NULL)
|
if (extent != NULL)
|
||||||
*commit = true;
|
*commit = true;
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
@ -892,13 +892,12 @@ arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
|
|||||||
|
|
||||||
static extent_t *
|
static extent_t *
|
||||||
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
|
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero)
|
||||||
size_t csize)
|
|
||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, usize,
|
||||||
alignment, zero, &commit, false);
|
alignment, zero, &commit, false);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
@ -920,7 +919,6 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
{
|
{
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t csize = CHUNK_CEILING(usize);
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
@ -932,11 +930,11 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
arena_nactive_add(arena, usize >> LG_PAGE);
|
arena_nactive_add(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
||||||
csize, alignment, zero, false);
|
usize, alignment, zero, false);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (extent == NULL) {
|
if (extent == NULL) {
|
||||||
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
||||||
usize, alignment, zero, csize);
|
usize, alignment, zero);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (extent);
|
return (extent);
|
||||||
@ -954,32 +952,10 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
|
|||||||
}
|
}
|
||||||
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
|
||||||
|
|
||||||
if ((extent_size_get(extent) & chunksize_mask) != 0)
|
|
||||||
extent_size_set(extent, CHUNK_CEILING(extent_size_get(extent)));
|
|
||||||
|
|
||||||
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
|
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|
||||||
size_t oldsize)
|
|
||||||
{
|
|
||||||
size_t usize = extent_size_get(extent);
|
|
||||||
|
|
||||||
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
|
|
||||||
assert(oldsize != usize);
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
|
||||||
if (config_stats)
|
|
||||||
arena_huge_ralloc_stats_update(arena, oldsize, usize);
|
|
||||||
if (oldsize < usize)
|
|
||||||
arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
|
|
||||||
else
|
|
||||||
arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
size_t oldsize)
|
size_t oldsize)
|
||||||
@ -1501,8 +1477,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
zero = false;
|
zero = false;
|
||||||
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
|
||||||
chunk_hooks, extent_addr_get(chunkselm),
|
chunk_hooks, extent_addr_get(chunkselm),
|
||||||
extent_size_get(chunkselm), chunksize, &zero,
|
extent_size_get(chunkselm), PAGE, &zero, false);
|
||||||
false);
|
|
||||||
assert(extent == chunkselm);
|
assert(extent == chunkselm);
|
||||||
assert(zero == extent_zeroed_get(chunkselm));
|
assert(zero == extent_zeroed_get(chunkselm));
|
||||||
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
||||||
@ -2641,7 +2616,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
if (likely(usize <= large_maxclass)) {
|
if (likely(usize <= large_maxclass)) {
|
||||||
ret = arena_palloc_large(tsdn, arena, usize, alignment,
|
ret = arena_palloc_large(tsdn, arena, usize, alignment,
|
||||||
zero);
|
zero);
|
||||||
} else if (likely(alignment <= chunksize))
|
} else if (likely(alignment <= PAGE))
|
||||||
ret = huge_malloc(tsdn, arena, usize, zero);
|
ret = huge_malloc(tsdn, arena, usize, zero);
|
||||||
else
|
else
|
||||||
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
|
ret = huge_palloc(tsdn, arena, usize, alignment, zero);
|
||||||
|
@ -57,7 +57,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
|
|||||||
{
|
{
|
||||||
bool zero = true;
|
bool zero = true;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
addr = chunk_alloc_mmap(NULL, csize, chunksize, &zero, &commit);
|
addr = chunk_alloc_mmap(NULL, csize, PAGE, &zero, &commit);
|
||||||
}
|
}
|
||||||
if (addr == NULL) {
|
if (addr == NULL) {
|
||||||
if (extent != NULL)
|
if (extent != NULL)
|
||||||
|
57
src/chunk.c
57
src/chunk.c
@ -58,8 +58,7 @@ static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
|||||||
static void
|
static void
|
||||||
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t psz =
|
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
||||||
extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent)));
|
|
||||||
pszind_t pind = psz2ind(psz);
|
pszind_t pind = psz2ind(psz);
|
||||||
extent_heap_insert(&extent_heaps[pind], extent);
|
extent_heap_insert(&extent_heaps[pind], extent);
|
||||||
}
|
}
|
||||||
@ -67,8 +66,7 @@ extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
|||||||
static void
|
static void
|
||||||
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t psz =
|
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
||||||
extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent)));
|
|
||||||
pszind_t pind = psz2ind(psz);
|
pszind_t pind = psz2ind(psz);
|
||||||
extent_heap_remove(&extent_heaps[pind], extent);
|
extent_heap_remove(&extent_heaps[pind], extent);
|
||||||
}
|
}
|
||||||
@ -169,11 +167,9 @@ extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent,
|
|||||||
assert(*r_elm_a != NULL);
|
assert(*r_elm_a != NULL);
|
||||||
|
|
||||||
if (extent_size_get(extent) > PAGE) {
|
if (extent_size_get(extent) > PAGE) {
|
||||||
uintptr_t last =
|
*r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree,
|
||||||
(CHUNK_CEILING((uintptr_t)extent_past_get(extent)) - PAGE);
|
(uintptr_t)extent_last_get(extent), dependent,
|
||||||
|
init_missing);
|
||||||
*r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last,
|
|
||||||
dependent, init_missing);
|
|
||||||
if (!dependent && *r_elm_b == NULL)
|
if (!dependent && *r_elm_b == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
assert(*r_elm_b != NULL);
|
assert(*r_elm_b != NULL);
|
||||||
@ -290,8 +286,6 @@ chunk_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES],
|
|||||||
{
|
{
|
||||||
pszind_t pind, i;
|
pszind_t pind, i;
|
||||||
|
|
||||||
assert(size == CHUNK_CEILING(size));
|
|
||||||
|
|
||||||
pind = psz2ind(extent_size_quantize_ceil(size));
|
pind = psz2ind(extent_size_quantize_ceil(size));
|
||||||
for (i = pind; i < NPSIZES; i++) {
|
for (i = pind; i < NPSIZES; i++) {
|
||||||
extent_t *extent = extent_heap_first(&extent_heaps[i]);
|
extent_t *extent = extent_heap_first(&extent_heaps[i]);
|
||||||
@ -326,9 +320,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
|
|
||||||
assert(new_addr == NULL || alignment == chunksize);
|
alloc_size = s2u(size + alignment - PAGE);
|
||||||
|
|
||||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
if (alloc_size < size)
|
if (alloc_size < size)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
@ -441,9 +433,7 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
/* "primary" dss. */
|
/* "primary" dss. */
|
||||||
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
if (have_dss && dss_prec == dss_prec_primary && (ret =
|
||||||
@ -472,9 +462,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
bool commit;
|
bool commit;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
commit = true;
|
commit = true;
|
||||||
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
|
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
|
||||||
@ -525,9 +513,7 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
|
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
|
||||||
false, new_addr, size, alignment, zero, commit, slab);
|
false, new_addr, size, alignment, zero, commit, slab);
|
||||||
@ -551,8 +537,10 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
addr = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
|
addr = chunk_hooks->alloc(new_addr, size, alignment, zero, commit,
|
||||||
arena->ind);
|
arena->ind);
|
||||||
if (addr == NULL)
|
if (addr == NULL) {
|
||||||
|
extent_dalloc(tsdn, arena, extent);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
}
|
||||||
extent_init(extent, arena, addr, size, true, false, zero, commit, slab);
|
extent_init(extent, arena, addr, size, true, false, zero, commit, slab);
|
||||||
if (chunk_register(tsdn, extent)) {
|
if (chunk_register(tsdn, extent)) {
|
||||||
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
|
chunk_leak(tsdn, arena, chunk_hooks, false, extent);
|
||||||
@ -585,9 +573,6 @@ static bool
|
|||||||
chunk_can_coalesce(const extent_t *a, const extent_t *b)
|
chunk_can_coalesce(const extent_t *a, const extent_t *b)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert((void *)CHUNK_CEILING((uintptr_t)extent_past_get(a)) ==
|
|
||||||
extent_addr_get(b));
|
|
||||||
|
|
||||||
if (extent_arena_get(a) != extent_arena_get(b))
|
if (extent_arena_get(a) != extent_arena_get(b))
|
||||||
return (false);
|
return (false);
|
||||||
if (extent_active_get(a) != extent_active_get(b))
|
if (extent_active_get(a) != extent_active_get(b))
|
||||||
@ -637,7 +622,6 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_lock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
assert((extent_size_get(extent) & chunksize_mask) == 0);
|
|
||||||
extent_active_set(extent, false);
|
extent_active_set(extent, false);
|
||||||
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
|
extent_zeroed_set(extent, !cache && extent_zeroed_get(extent));
|
||||||
if (extent_slab_get(extent)) {
|
if (extent_slab_get(extent)) {
|
||||||
@ -651,7 +635,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
/* Try to coalesce forward. */
|
/* Try to coalesce forward. */
|
||||||
next = rtree_read(tsdn, &chunks_rtree,
|
next = rtree_read(tsdn, &chunks_rtree,
|
||||||
CHUNK_CEILING((uintptr_t)extent_past_get(extent)), false);
|
(uintptr_t)extent_past_get(extent), false);
|
||||||
if (next != NULL) {
|
if (next != NULL) {
|
||||||
chunk_try_coalesce(tsdn, arena, chunk_hooks, extent, next,
|
chunk_try_coalesce(tsdn, arena, chunk_hooks, extent, next,
|
||||||
extent_heaps, cache);
|
extent_heaps, cache);
|
||||||
@ -659,7 +643,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
/* Try to coalesce backward. */
|
/* Try to coalesce backward. */
|
||||||
prev = rtree_read(tsdn, &chunks_rtree,
|
prev = rtree_read(tsdn, &chunks_rtree,
|
||||||
(uintptr_t)extent_addr_get(extent) - PAGE, false);
|
(uintptr_t)extent_before_get(extent), false);
|
||||||
if (prev != NULL) {
|
if (prev != NULL) {
|
||||||
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
|
chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent,
|
||||||
extent_heaps, cache);
|
extent_heaps, cache);
|
||||||
@ -675,7 +659,6 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
assert(extent_addr_get(extent) != NULL);
|
assert(extent_addr_get(extent) != NULL);
|
||||||
assert(extent_size_get(extent) != 0);
|
assert(extent_size_get(extent) != 0);
|
||||||
assert((extent_size_get(extent) & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
extent_zeroed_set(extent, false);
|
extent_zeroed_set(extent, false);
|
||||||
|
|
||||||
@ -700,7 +683,6 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
assert(extent_addr_get(extent) != NULL);
|
assert(extent_addr_get(extent) != NULL);
|
||||||
assert(extent_size_get(extent) != 0);
|
assert(extent_size_get(extent) != 0);
|
||||||
assert((extent_size_get(extent) & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
/* Try to deallocate. */
|
/* Try to deallocate. */
|
||||||
@ -788,8 +770,7 @@ chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
return (chunk_hooks->purge(extent_addr_get(extent),
|
return (chunk_hooks->purge(extent_addr_get(extent),
|
||||||
CHUNK_CEILING(extent_size_get(extent)), offset, length,
|
extent_size_get(extent), offset, length, arena->ind));
|
||||||
arena->ind));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -809,9 +790,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
extent_t *trail;
|
extent_t *trail;
|
||||||
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
||||||
|
|
||||||
assert(CHUNK_CEILING(size_a) == size_a);
|
assert(extent_size_get(extent) == size_a + size_b);
|
||||||
assert(CHUNK_CEILING(extent_size_get(extent)) == size_a +
|
|
||||||
CHUNK_CEILING(size_b));
|
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
@ -833,16 +812,15 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
extent_init(trail, arena, (void *)((uintptr_t)extent_addr_get(extent) +
|
extent_init(trail, arena, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
size_a), CHUNK_CEILING(size_b), extent_active_get(extent),
|
size_a), size_b, extent_active_get(extent),
|
||||||
extent_dirty_get(extent), extent_zeroed_get(extent),
|
extent_dirty_get(extent), extent_zeroed_get(extent),
|
||||||
extent_committed_get(extent), extent_slab_get(extent));
|
extent_committed_get(extent), extent_slab_get(extent));
|
||||||
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
||||||
&trail_elm_b))
|
&trail_elm_b))
|
||||||
goto label_error_c;
|
goto label_error_c;
|
||||||
|
|
||||||
if (chunk_hooks->split(extent_addr_get(extent), size_a +
|
if (chunk_hooks->split(extent_addr_get(extent), size_a + size_b, size_a,
|
||||||
CHUNK_CEILING(size_b), size_a, CHUNK_CEILING(size_b),
|
size_b, extent_committed_get(extent), arena->ind))
|
||||||
extent_committed_get(extent), arena->ind))
|
|
||||||
goto label_error_d;
|
goto label_error_d;
|
||||||
|
|
||||||
extent_size_set(extent, size_a);
|
extent_size_set(extent, size_a);
|
||||||
@ -886,9 +864,6 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
{
|
{
|
||||||
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
||||||
|
|
||||||
assert((extent_size_get(a) & chunksize_mask) == 0);
|
|
||||||
assert((extent_size_get(b) & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
|
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
|
||||||
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
|
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
|
||||||
|
@ -69,9 +69,12 @@ void *
|
|||||||
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
||||||
size_t alignment, bool *zero, bool *commit)
|
size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
|
void *ret;
|
||||||
|
extent_t *pad;
|
||||||
|
|
||||||
cassert(have_dss);
|
cassert(have_dss);
|
||||||
assert(size > 0 && (size & chunksize_mask) == 0);
|
assert(size > 0);
|
||||||
assert(alignment > 0 && (alignment & chunksize_mask) == 0);
|
assert(alignment > 0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* sbrk() uses a signed increment argument, so take care not to
|
* sbrk() uses a signed increment argument, so take care not to
|
||||||
@ -80,19 +83,22 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
if ((intptr_t)size < 0)
|
if ((intptr_t)size < 0)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
|
pad = extent_alloc(tsdn, arena);
|
||||||
|
if (pad == NULL)
|
||||||
|
return (NULL);
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &dss_mtx);
|
malloc_mutex_lock(tsdn, &dss_mtx);
|
||||||
if (dss_prev != (void *)-1) {
|
if (dss_prev != (void *)-1) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The loop is necessary to recover from races with other
|
* The loop is necessary to recover from races with other
|
||||||
* threads that are using the DSS for something other than
|
* threads that are using the DSS for something other than
|
||||||
* malloc.
|
* malloc.
|
||||||
*/
|
*/
|
||||||
do {
|
while (true) {
|
||||||
void *ret, *cpad_addr, *dss_next;
|
void *pad_addr, *dss_next;
|
||||||
extent_t *cpad;
|
size_t pad_size;
|
||||||
size_t gap_size, cpad_size;
|
|
||||||
intptr_t incr;
|
intptr_t incr;
|
||||||
|
|
||||||
/* Avoid an unnecessary system call. */
|
/* Avoid an unnecessary system call. */
|
||||||
if (new_addr != NULL && dss_max != new_addr)
|
if (new_addr != NULL && dss_max != new_addr)
|
||||||
break;
|
break;
|
||||||
@ -105,58 +111,48 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate how much padding is necessary to
|
* Compute how much pad space (if any) is necessary to
|
||||||
* chunk-align the end of the DSS.
|
* satisfy alignment. This space can be recycled for
|
||||||
|
* later use.
|
||||||
*/
|
*/
|
||||||
gap_size = (chunksize - ALIGNMENT_ADDR2OFFSET(dss_max,
|
pad_addr = (void *)((uintptr_t)dss_max);
|
||||||
chunksize)) & chunksize_mask;
|
|
||||||
/*
|
|
||||||
* Compute how much chunk-aligned pad space (if any) is
|
|
||||||
* necessary to satisfy alignment. This space can be
|
|
||||||
* recycled for later use.
|
|
||||||
*/
|
|
||||||
cpad_addr = (void *)((uintptr_t)dss_max + gap_size);
|
|
||||||
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
|
ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
|
||||||
alignment);
|
alignment);
|
||||||
cpad_size = (uintptr_t)ret - (uintptr_t)cpad_addr;
|
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
|
||||||
if (cpad_size != 0) {
|
if (pad_size != 0) {
|
||||||
cpad = extent_alloc(tsdn, arena);
|
extent_init(pad, arena, pad_addr, pad_size,
|
||||||
if (cpad == NULL) {
|
|
||||||
malloc_mutex_unlock(tsdn, &dss_mtx);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
extent_init(cpad, arena, cpad_addr, cpad_size,
|
|
||||||
false, true, false, true, false);
|
false, true, false, true, false);
|
||||||
}
|
}
|
||||||
dss_next = (void *)((uintptr_t)ret + size);
|
dss_next = (void *)((uintptr_t)ret + size);
|
||||||
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
if ((uintptr_t)ret < (uintptr_t)dss_max ||
|
||||||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
|
(uintptr_t)dss_next < (uintptr_t)dss_max)
|
||||||
/* Wrap-around. */
|
break; /* Wrap-around. */
|
||||||
malloc_mutex_unlock(tsdn, &dss_mtx);
|
incr = pad_size + size;
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
incr = gap_size + cpad_size + size;
|
|
||||||
dss_prev = chunk_dss_sbrk(incr);
|
dss_prev = chunk_dss_sbrk(incr);
|
||||||
|
if (dss_prev == (void *)-1)
|
||||||
|
break;
|
||||||
if (dss_prev == dss_max) {
|
if (dss_prev == dss_max) {
|
||||||
/* Success. */
|
/* Success. */
|
||||||
dss_max = dss_next;
|
dss_max = dss_next;
|
||||||
malloc_mutex_unlock(tsdn, &dss_mtx);
|
malloc_mutex_unlock(tsdn, &dss_mtx);
|
||||||
if (cpad_size != 0) {
|
if (pad_size != 0) {
|
||||||
chunk_hooks_t chunk_hooks =
|
chunk_hooks_t chunk_hooks =
|
||||||
CHUNK_HOOKS_INITIALIZER;
|
CHUNK_HOOKS_INITIALIZER;
|
||||||
chunk_dalloc_wrapper(tsdn, arena,
|
chunk_dalloc_wrapper(tsdn, arena,
|
||||||
&chunk_hooks, cpad);
|
&chunk_hooks, pad);
|
||||||
}
|
} else
|
||||||
|
extent_dalloc(tsdn, arena, pad);
|
||||||
if (*zero)
|
if (*zero)
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
if (!*commit)
|
if (!*commit)
|
||||||
*commit = pages_decommit(ret, size);
|
*commit = pages_decommit(ret, size);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
} while (dss_prev != (void *)-1);
|
}
|
||||||
}
|
}
|
||||||
|
/* OOM. */
|
||||||
malloc_mutex_unlock(tsdn, &dss_mtx);
|
malloc_mutex_unlock(tsdn, &dss_mtx);
|
||||||
|
extent_dalloc(tsdn, arena, pad);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,7 +50,6 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
|
||||||
|
|
||||||
ret = pages_map(new_addr, size, commit);
|
ret = pages_map(new_addr, size, commit);
|
||||||
if (ret == NULL || ret == new_addr)
|
if (ret == NULL || ret == new_addr)
|
||||||
|
145
src/huge.c
145
src/huge.c
@ -9,7 +9,7 @@ huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
|
|||||||
|
|
||||||
assert(usize == s2u(usize));
|
assert(usize == s2u(usize));
|
||||||
|
|
||||||
return (huge_palloc(tsdn, arena, usize, chunksize, zero));
|
return (huge_palloc(tsdn, arena, usize, PAGE, zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -20,14 +20,11 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
bool is_zeroed;
|
bool is_zeroed;
|
||||||
|
|
||||||
/* Allocate one or more contiguous chunks for this request. */
|
|
||||||
|
|
||||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||||
|
|
||||||
ausize = sa2u(usize, alignment);
|
ausize = sa2u(usize, alignment);
|
||||||
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
|
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
assert(ausize >= chunksize);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
||||||
@ -40,9 +37,6 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
arena, usize, alignment, &is_zeroed)) == NULL)
|
arena, usize, alignment, &is_zeroed)) == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
if (usize < extent_size_get(extent))
|
|
||||||
extent_size_set(extent, usize);
|
|
||||||
|
|
||||||
/* Insert extent into huge. */
|
/* Insert extent into huge. */
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
ql_elm_new(extent, ql_link);
|
ql_elm_new(extent, ql_link);
|
||||||
@ -86,80 +80,20 @@ huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
|
|||||||
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
|
||||||
huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|
||||||
size_t usize_max, bool zero)
|
|
||||||
{
|
|
||||||
size_t usize, usize_next;
|
|
||||||
arena_t *arena = extent_arena_get(extent);
|
|
||||||
size_t oldsize = extent_size_get(extent);
|
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
|
||||||
bool pre_zeroed, post_zeroed;
|
|
||||||
|
|
||||||
/* Increase usize to incorporate extra. */
|
|
||||||
for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
|
|
||||||
<= oldsize; usize = usize_next)
|
|
||||||
; /* Do nothing. */
|
|
||||||
|
|
||||||
if (oldsize == usize)
|
|
||||||
return;
|
|
||||||
|
|
||||||
pre_zeroed = extent_zeroed_get(extent);
|
|
||||||
|
|
||||||
/* Fill if necessary (shrinking). */
|
|
||||||
if (oldsize > usize) {
|
|
||||||
size_t sdiff = oldsize - usize;
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
|
||||||
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
|
||||||
usize), JEMALLOC_FREE_JUNK, sdiff);
|
|
||||||
post_zeroed = false;
|
|
||||||
} else {
|
|
||||||
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
|
||||||
&chunk_hooks, extent, usize, sdiff);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
post_zeroed = pre_zeroed;
|
|
||||||
|
|
||||||
/* Update the size of the huge allocation. */
|
|
||||||
assert(extent_size_get(extent) != usize);
|
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
|
||||||
extent_size_set(extent, usize);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
|
||||||
/* Update zeroed. */
|
|
||||||
extent_zeroed_set(extent, post_zeroed);
|
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(tsdn, arena, extent, oldsize);
|
|
||||||
|
|
||||||
/* Fill if necessary (growing). */
|
|
||||||
if (oldsize < usize) {
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
|
||||||
if (!pre_zeroed) {
|
|
||||||
memset((void *)
|
|
||||||
((uintptr_t)extent_addr_get(extent) +
|
|
||||||
oldsize), 0, usize - oldsize);
|
|
||||||
}
|
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
|
||||||
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
|
||||||
oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
||||||
{
|
{
|
||||||
arena_t *arena = extent_arena_get(extent);
|
arena_t *arena = extent_arena_get(extent);
|
||||||
size_t oldsize = extent_size_get(extent);
|
size_t oldsize = extent_size_get(extent);
|
||||||
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
size_t diff = oldsize - usize;
|
||||||
size_t sdiff = CHUNK_CEILING(usize) - usize;
|
|
||||||
|
|
||||||
assert(oldsize > usize);
|
assert(oldsize > usize);
|
||||||
|
|
||||||
/* Split excess chunks. */
|
/* Split excess pages. */
|
||||||
if (cdiff != 0) {
|
if (diff != 0) {
|
||||||
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
|
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
extent, CHUNK_CEILING(usize), cdiff);
|
extent, usize, diff);
|
||||||
if (trail == NULL)
|
if (trail == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
@ -171,28 +105,6 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
|||||||
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail);
|
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Optionally fill trailing subchunk. */
|
|
||||||
if (sdiff != 0) {
|
|
||||||
bool post_zeroed;
|
|
||||||
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
|
||||||
huge_dalloc_junk(tsdn,
|
|
||||||
(void *)((uintptr_t)extent_addr_get(extent) +
|
|
||||||
usize), sdiff);
|
|
||||||
post_zeroed = false;
|
|
||||||
} else {
|
|
||||||
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
|
||||||
&chunk_hooks, extent, usize, sdiff);
|
|
||||||
|
|
||||||
if (config_fill && unlikely(opt_zero) && !post_zeroed) {
|
|
||||||
memset((void *)
|
|
||||||
((uintptr_t)extent_addr_get(extent) +
|
|
||||||
usize), 0, sdiff);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extent_zeroed_set(extent, post_zeroed);
|
|
||||||
}
|
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldsize);
|
arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldsize);
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
@ -204,20 +116,18 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
{
|
{
|
||||||
arena_t *arena = extent_arena_get(extent);
|
arena_t *arena = extent_arena_get(extent);
|
||||||
size_t oldsize = extent_size_get(extent);
|
size_t oldsize = extent_size_get(extent);
|
||||||
bool is_zeroed_subchunk = extent_zeroed_get(extent);
|
bool is_zeroed_trail = false;
|
||||||
bool is_zeroed_chunk = false;
|
|
||||||
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
size_t trailsize = usize - oldsize;
|
||||||
void *nchunk =
|
|
||||||
(void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
|
|
||||||
extent_t *trail;
|
extent_t *trail;
|
||||||
|
|
||||||
if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks, nchunk,
|
if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks,
|
||||||
cdiff, chunksize, &is_zeroed_chunk)) == NULL) {
|
extent_past_get(extent), trailsize, PAGE, &is_zeroed_trail)) ==
|
||||||
|
NULL) {
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
|
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit, false))
|
extent_past_get(extent), trailsize, PAGE, &is_zeroed_trail,
|
||||||
== NULL)
|
&commit, false)) == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,23 +137,15 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed_subchunk) {
|
if (!is_zeroed_trail) {
|
||||||
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
oldsize), 0, CHUNK_CEILING(oldsize) - oldsize);
|
oldsize), 0, usize - oldsize);
|
||||||
}
|
|
||||||
if (!is_zeroed_chunk) {
|
|
||||||
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
|
||||||
CHUNK_CEILING(oldsize)), 0, usize -
|
|
||||||
CHUNK_CEILING(oldsize));
|
|
||||||
}
|
}
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||||
memset((void *)((uintptr_t)extent_addr_get(extent) + oldsize),
|
memset((void *)((uintptr_t)extent_addr_get(extent) + oldsize),
|
||||||
JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (usize < extent_size_get(extent))
|
|
||||||
extent_size_set(extent, usize);
|
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldsize);
|
arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldsize);
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
@ -260,7 +162,7 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
/* Both allocation sizes must be huge to avoid a move. */
|
/* Both allocation sizes must be huge to avoid a move. */
|
||||||
assert(extent_size_get(extent) >= chunksize && usize_max >= chunksize);
|
assert(extent_size_get(extent) >= chunksize && usize_max >= chunksize);
|
||||||
|
|
||||||
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(extent_size_get(extent))) {
|
if (usize_max > extent_size_get(extent)) {
|
||||||
/* Attempt to expand the allocation in-place. */
|
/* Attempt to expand the allocation in-place. */
|
||||||
if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max,
|
if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||||
zero)) {
|
zero)) {
|
||||||
@ -268,9 +170,9 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
/* Try again, this time with usize_min. */
|
/* Try again, this time with usize_min. */
|
||||||
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
if (usize_min < usize_max && usize_min > extent_size_get(extent)
|
||||||
CHUNK_CEILING(extent_size_get(extent)) &&
|
&& huge_ralloc_no_move_expand(tsdn, extent, usize_min,
|
||||||
huge_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) {
|
zero)) {
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -280,17 +182,14 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
|||||||
* Avoid moving the allocation if the existing chunk size accommodates
|
* Avoid moving the allocation if the existing chunk size accommodates
|
||||||
* the new size.
|
* the new size.
|
||||||
*/
|
*/
|
||||||
if (CHUNK_CEILING(extent_size_get(extent)) >= CHUNK_CEILING(usize_min)
|
if (extent_size_get(extent) >= usize_min && extent_size_get(extent) <=
|
||||||
&& CHUNK_CEILING(extent_size_get(extent)) <=
|
usize_max) {
|
||||||
CHUNK_CEILING(usize_max)) {
|
|
||||||
huge_ralloc_no_move_similar(tsdn, extent, usize_min, usize_max,
|
|
||||||
zero);
|
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Attempt to shrink the allocation in-place. */
|
/* Attempt to shrink the allocation in-place. */
|
||||||
if (CHUNK_CEILING(extent_size_get(extent)) > CHUNK_CEILING(usize_max)) {
|
if (extent_size_get(extent) > usize_max) {
|
||||||
if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
@ -304,7 +203,7 @@ huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
size_t alignment, bool zero)
|
size_t alignment, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (alignment <= chunksize)
|
if (alignment <= PAGE)
|
||||||
return (huge_malloc(tsdn, arena, usize, zero));
|
return (huge_malloc(tsdn, arena, usize, zero));
|
||||||
return (huge_palloc(tsdn, arena, usize, alignment, zero));
|
return (huge_palloc(tsdn, arena, usize, alignment, zero));
|
||||||
}
|
}
|
||||||
|
@ -224,22 +224,6 @@ TEST_BEGIN(test_chunk)
|
|||||||
do_dalloc = true;
|
do_dalloc = true;
|
||||||
do_decommit = false;
|
do_decommit = false;
|
||||||
|
|
||||||
/* Test purge for partial-chunk huge allocations. */
|
|
||||||
if (huge0 * 2 > huge2) {
|
|
||||||
/*
|
|
||||||
* There are at least four size classes per doubling, so a
|
|
||||||
* successful xallocx() from size=huge2 to size=huge1 is
|
|
||||||
* guaranteed to leave trailing purgeable memory.
|
|
||||||
*/
|
|
||||||
p = mallocx(huge2, flags);
|
|
||||||
assert_ptr_not_null(p, "Unexpected mallocx() error");
|
|
||||||
did_purge = false;
|
|
||||||
assert_zu_eq(xallocx(p, huge1, 0, flags), huge1,
|
|
||||||
"Unexpected xallocx() failure");
|
|
||||||
assert_true(did_purge, "Expected purge");
|
|
||||||
dallocx(p, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Test decommit for large allocations. */
|
/* Test decommit for large allocations. */
|
||||||
do_decommit = true;
|
do_decommit = true;
|
||||||
p = mallocx(large1, flags);
|
p = mallocx(large1, flags);
|
||||||
|
Loading…
Reference in New Issue
Block a user