Add/use chunk_split_wrapper().
Remove redundant ptr/oldsize args from huge_*(). Refactor huge/chunk/arena code boundaries.
This commit is contained in:
parent
1ad060584f
commit
de0305a7f3
@ -176,13 +176,6 @@ typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
|
|||||||
#ifdef JEMALLOC_ARENA_STRUCTS_B
|
#ifdef JEMALLOC_ARENA_STRUCTS_B
|
||||||
/* Arena chunk header. */
|
/* Arena chunk header. */
|
||||||
struct arena_chunk_s {
|
struct arena_chunk_s {
|
||||||
/*
|
|
||||||
* A pointer to the arena that owns the chunk is stored within the
|
|
||||||
* extent structure. This field as a whole is used by chunks_rtree to
|
|
||||||
* support both ivsalloc() and core-based debugging.
|
|
||||||
*/
|
|
||||||
extent_t extent;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map of pages within chunk that keeps track of free/large/small. The
|
* Map of pages within chunk that keeps track of free/large/small. The
|
||||||
* first map_bias entries are omitted, since the chunk header does not
|
* first map_bias entries are omitted, since the chunk header does not
|
||||||
@ -315,7 +308,7 @@ struct arena_s {
|
|||||||
* order to avoid interactions between multiple threads that could make
|
* order to avoid interactions between multiple threads that could make
|
||||||
* a single spare inadequate.
|
* a single spare inadequate.
|
||||||
*/
|
*/
|
||||||
arena_chunk_t *spare;
|
extent_t *spare;
|
||||||
|
|
||||||
/* Minimum ratio (log base 2) of nactive:ndirty. */
|
/* Minimum ratio (log base 2) of nactive:ndirty. */
|
||||||
ssize_t lg_dirty_mult;
|
ssize_t lg_dirty_mult;
|
||||||
@ -481,22 +474,27 @@ typedef size_t (run_quantize_t)(size_t);
|
|||||||
extern run_quantize_t *run_quantize_floor;
|
extern run_quantize_t *run_quantize_floor;
|
||||||
extern run_quantize_t *run_quantize_ceil;
|
extern run_quantize_t *run_quantize_ceil;
|
||||||
#endif
|
#endif
|
||||||
|
extent_t *arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero);
|
||||||
|
void arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
|
||||||
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
|
void arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent,
|
||||||
bool cache);
|
bool cache);
|
||||||
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
||||||
bool cache);
|
bool cache);
|
||||||
extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
extent_t *arena_extent_alloc(tsdn_t *tsdn, arena_t *arena);
|
||||||
void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
void arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||||
void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
||||||
size_t alignment, bool *zero);
|
size_t usize, size_t alignment, bool *zero);
|
||||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
||||||
size_t usize);
|
size_t usize);
|
||||||
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
|
||||||
void *chunk, size_t oldsize, size_t usize);
|
extent_t *extent, size_t oldsize);
|
||||||
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||||
void *chunk, size_t oldsize, size_t usize);
|
extent_t *extent, size_t oldsize);
|
||||||
bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
||||||
extent_t *extent, size_t usize);
|
extent_t *extent, size_t oldsize);
|
||||||
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
|
ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
|
||||||
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
|
bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
|
||||||
ssize_t lg_dirty_mult);
|
ssize_t lg_dirty_mult);
|
||||||
@ -1193,7 +1191,7 @@ arena_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
|||||||
ret = atomic_read_p(&elm->prof_tctx_pun);
|
ret = atomic_read_p(&elm->prof_tctx_pun);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ret = huge_prof_tctx_get(tsdn, extent, ptr);
|
ret = huge_prof_tctx_get(tsdn, extent);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -1230,7 +1228,7 @@ arena_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|||||||
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
assert(arena_mapbits_large_get(chunk, pageind) == 0);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_prof_tctx_set(tsdn, extent, ptr, tctx);
|
huge_prof_tctx_set(tsdn, extent, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE void
|
JEMALLOC_INLINE void
|
||||||
@ -1258,7 +1256,7 @@ arena_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|||||||
atomic_write_p(&elm->prof_tctx_pun,
|
atomic_write_p(&elm->prof_tctx_pun,
|
||||||
(prof_tctx_t *)(uintptr_t)1U);
|
(prof_tctx_t *)(uintptr_t)1U);
|
||||||
} else
|
} else
|
||||||
huge_prof_tctx_reset(tsdn, extent, ptr);
|
huge_prof_tctx_reset(tsdn, extent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1362,7 +1360,7 @@ arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
|
|||||||
ret = index2size(binind);
|
ret = index2size(binind);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
ret = huge_salloc(tsdn, extent, ptr);
|
ret = huge_salloc(tsdn, extent);
|
||||||
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
@ -1413,7 +1411,7 @@ arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_dalloc(tsdn, extent, ptr);
|
huge_dalloc(tsdn, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
@ -1470,7 +1468,7 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
huge_dalloc(tsdn, extent, ptr);
|
huge_dalloc(tsdn, extent);
|
||||||
}
|
}
|
||||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||||
#endif
|
#endif
|
||||||
|
@ -55,10 +55,10 @@ chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
|
|||||||
bool chunk_register(tsdn_t *tsdn, const extent_t *extent);
|
bool chunk_register(tsdn_t *tsdn, const extent_t *extent);
|
||||||
void chunk_deregister(tsdn_t *tsdn, const extent_t *extent);
|
void chunk_deregister(tsdn_t *tsdn, const extent_t *extent);
|
||||||
void chunk_reregister(tsdn_t *tsdn, const extent_t *extent);
|
void chunk_reregister(tsdn_t *tsdn, const extent_t *extent);
|
||||||
void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
extent_t *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool dalloc_extent);
|
bool *zero);
|
||||||
void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
extent_t *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
bool *zero, bool *commit);
|
bool *zero, bool *commit);
|
||||||
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
|
||||||
@ -75,6 +75,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
|||||||
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
|
||||||
size_t length);
|
size_t length);
|
||||||
|
extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t size_b);
|
||||||
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
|
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
|
||||||
bool chunk_boot(void);
|
bool chunk_boot(void);
|
||||||
|
@ -12,22 +12,19 @@
|
|||||||
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
|
||||||
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool zero);
|
size_t alignment, bool zero);
|
||||||
bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||||
size_t oldsize, size_t usize_min, size_t usize_max, bool zero);
|
size_t usize_max, bool zero);
|
||||||
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
size_t oldsize, size_t usize, size_t alignment, bool zero,
|
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||||
tcache_t *tcache);
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
|
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
|
||||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||||
#endif
|
#endif
|
||||||
void huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr);
|
void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||||
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
|
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||||
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent,
|
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||||
const void *ptr);
|
void huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
|
||||||
void huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
void huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
|
||||||
prof_tctx_t *tctx);
|
|
||||||
void huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr);
|
|
||||||
|
|
||||||
#endif /* JEMALLOC_H_EXTERNS */
|
#endif /* JEMALLOC_H_EXTERNS */
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
|
@ -12,6 +12,8 @@ arena_choose
|
|||||||
arena_choose_hard
|
arena_choose_hard
|
||||||
arena_choose_impl
|
arena_choose_impl
|
||||||
arena_chunk_alloc_huge
|
arena_chunk_alloc_huge
|
||||||
|
arena_chunk_cache_alloc
|
||||||
|
arena_chunk_cache_dalloc
|
||||||
arena_chunk_cache_maybe_insert
|
arena_chunk_cache_maybe_insert
|
||||||
arena_chunk_cache_maybe_remove
|
arena_chunk_cache_maybe_remove
|
||||||
arena_chunk_dalloc_huge
|
arena_chunk_dalloc_huge
|
||||||
@ -184,6 +186,7 @@ chunk_prefork
|
|||||||
chunk_purge_wrapper
|
chunk_purge_wrapper
|
||||||
chunk_register
|
chunk_register
|
||||||
chunk_reregister
|
chunk_reregister
|
||||||
|
chunk_split_wrapper
|
||||||
chunks_rtree
|
chunks_rtree
|
||||||
chunksize
|
chunksize
|
||||||
chunksize_mask
|
chunksize_mask
|
||||||
|
483
src/arena.c
483
src/arena.c
@ -220,6 +220,55 @@ arena_chunk_dirty_npages(const extent_t *extent)
|
|||||||
return (extent_size_get(extent) >> LG_PAGE);
|
return (extent_size_get(extent) >> LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static extent_t *
|
||||||
|
arena_chunk_cache_alloc_locked(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
|
return (chunk_alloc_cache(tsdn, arena, chunk_hooks, new_addr, size,
|
||||||
|
alignment, zero));
|
||||||
|
}
|
||||||
|
|
||||||
|
extent_t *
|
||||||
|
arena_chunk_cache_alloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
|
||||||
|
bool *zero)
|
||||||
|
{
|
||||||
|
extent_t *extent;
|
||||||
|
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks,
|
||||||
|
new_addr, size, alignment, zero);
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
|
return (extent);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
arena_chunk_cache_dalloc_locked(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
|
chunk_dalloc_cache(tsdn, arena, chunk_hooks, chunk, size, committed);
|
||||||
|
arena_maybe_purge(tsdn, arena);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
arena_chunk_cache_dalloc(tsdn_t *tsdn, arena_t *arena,
|
||||||
|
chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed)
|
||||||
|
{
|
||||||
|
|
||||||
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
arena_chunk_cache_dalloc_locked(tsdn, arena, chunk_hooks, chunk, size,
|
||||||
|
committed);
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
|
arena_chunk_cache_maybe_insert(arena_t *arena, extent_t *extent, bool cache)
|
||||||
{
|
{
|
||||||
@ -492,112 +541,119 @@ arena_run_split_small(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static extent_t *
|
||||||
arena_chunk_init_spare(arena_t *arena)
|
arena_chunk_init_spare(arena_t *arena)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
extent_t *extent;
|
||||||
|
|
||||||
assert(arena->spare != NULL);
|
assert(arena->spare != NULL);
|
||||||
|
|
||||||
chunk = arena->spare;
|
extent = arena->spare;
|
||||||
arena->spare = NULL;
|
arena->spare = NULL;
|
||||||
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
assert(arena_mapbits_allocated_get((arena_chunk_t *)
|
||||||
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
extent_addr_get(extent), map_bias) == 0);
|
||||||
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
|
assert(arena_mapbits_allocated_get((arena_chunk_t *)
|
||||||
arena_maxrun);
|
extent_addr_get(extent), chunk_npages-1) == 0);
|
||||||
assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
|
assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
|
||||||
arena_maxrun);
|
extent_addr_get(extent), map_bias) == arena_maxrun);
|
||||||
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
|
assert(arena_mapbits_unallocated_size_get((arena_chunk_t *)
|
||||||
arena_mapbits_dirty_get(chunk, chunk_npages-1));
|
extent_addr_get(extent), chunk_npages-1) == arena_maxrun);
|
||||||
|
assert(arena_mapbits_dirty_get((arena_chunk_t *)
|
||||||
|
extent_addr_get(extent), map_bias) ==
|
||||||
|
arena_mapbits_dirty_get((arena_chunk_t *)extent_addr_get(extent),
|
||||||
|
chunk_npages-1));
|
||||||
|
|
||||||
return (chunk);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static extent_t *
|
||||||
arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
|
||||||
bool zero)
|
|
||||||
{
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The extent notion of "committed" doesn't directly apply to arena
|
|
||||||
* chunks. Arbitrarily mark them as committed. The commit state of
|
|
||||||
* runs is tracked individually, and upon chunk deallocation the entire
|
|
||||||
* chunk is in a consistent commit state.
|
|
||||||
*/
|
|
||||||
extent_init(&chunk->extent, arena, chunk, chunksize, true, zero, true,
|
|
||||||
true);
|
|
||||||
return (chunk_register(tsdn, &chunk->extent));
|
|
||||||
}
|
|
||||||
|
|
||||||
static arena_chunk_t *
|
|
||||||
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
|
chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
extent_t *extent;
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize,
|
||||||
NULL, chunksize, chunksize, zero, commit);
|
chunksize, zero, commit);
|
||||||
if (chunk != NULL && !*commit) {
|
if (extent != NULL && !*commit) {
|
||||||
/* Commit header. */
|
/* Commit header. */
|
||||||
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, chunk,
|
if (chunk_commit_wrapper(tsdn, arena, chunk_hooks,
|
||||||
chunksize, 0, map_bias << LG_PAGE)) {
|
extent_addr_get(extent), extent_size_get(extent), 0,
|
||||||
|
map_bias << LG_PAGE)) {
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
(void *)chunk, chunksize, *zero, *commit);
|
extent_addr_get(extent), extent_size_get(extent),
|
||||||
chunk = NULL;
|
extent_zeroed_get(extent),
|
||||||
|
extent_committed_get(extent));
|
||||||
|
extent = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
|
|
||||||
|
if (extent != NULL) {
|
||||||
|
extent_slab_set(extent, true);
|
||||||
|
|
||||||
|
if (chunk_register(tsdn, extent)) {
|
||||||
if (!*commit) {
|
if (!*commit) {
|
||||||
/* Undo commit of header. */
|
/* Undo commit of header. */
|
||||||
chunk_decommit_wrapper(tsdn, arena, chunk_hooks,
|
chunk_decommit_wrapper(tsdn, arena, chunk_hooks,
|
||||||
chunk, chunksize, 0, map_bias << LG_PAGE);
|
extent_addr_get(extent),
|
||||||
|
extent_size_get(extent), 0, map_bias <<
|
||||||
|
LG_PAGE);
|
||||||
|
}
|
||||||
|
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
|
||||||
|
extent_addr_get(extent), extent_size_get(extent),
|
||||||
|
extent_zeroed_get(extent),
|
||||||
|
extent_committed_get(extent));
|
||||||
|
extent = NULL;
|
||||||
}
|
}
|
||||||
chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
|
|
||||||
chunksize, *zero, *commit);
|
|
||||||
chunk = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
return (chunk);
|
|
||||||
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static extent_t *
|
||||||
arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
|
||||||
bool *commit)
|
bool *commit)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
extent_t *extent;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
||||||
chunksize, zero, true);
|
chunksize, chunksize, zero);
|
||||||
if (chunk != NULL) {
|
if (extent != NULL) {
|
||||||
if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
|
extent_slab_set(extent, true);
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
|
|
||||||
chunksize, true);
|
if (chunk_register(tsdn, extent)) {
|
||||||
|
arena_chunk_cache_dalloc_locked(tsdn, arena,
|
||||||
|
&chunk_hooks, extent_addr_get(extent),
|
||||||
|
extent_size_get(extent), true);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
*commit = true;
|
*commit = true;
|
||||||
}
|
}
|
||||||
if (chunk == NULL) {
|
if (extent == NULL) {
|
||||||
chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
|
extent = arena_chunk_alloc_internal_hard(tsdn, arena,
|
||||||
&chunk_hooks, zero, commit);
|
&chunk_hooks, zero, commit);
|
||||||
|
if (extent == NULL)
|
||||||
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
assert(extent_slab_get(extent));
|
||||||
|
|
||||||
if (config_stats && chunk != NULL) {
|
if (config_stats) {
|
||||||
arena->stats.mapped += chunksize;
|
arena->stats.mapped += extent_size_get(extent);
|
||||||
arena->stats.metadata_mapped += (map_bias << LG_PAGE);
|
arena->stats.metadata_mapped += (map_bias << LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (chunk);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static extent_t *
|
||||||
arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
extent_t *extent;
|
||||||
bool zero, commit;
|
bool zero, commit;
|
||||||
size_t flag_unzeroed, flag_decommitted, i;
|
size_t flag_unzeroed, flag_decommitted, i;
|
||||||
|
|
||||||
@ -605,8 +661,8 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
|||||||
|
|
||||||
zero = false;
|
zero = false;
|
||||||
commit = false;
|
commit = false;
|
||||||
chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
|
extent = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
|
||||||
if (chunk == NULL)
|
if (extent == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -616,58 +672,63 @@ arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
|
|||||||
*/
|
*/
|
||||||
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
|
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
|
||||||
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
|
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
|
||||||
arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
|
arena_mapbits_unallocated_set((arena_chunk_t *)extent_addr_get(extent),
|
||||||
flag_unzeroed | flag_decommitted);
|
map_bias, arena_maxrun, flag_unzeroed | flag_decommitted);
|
||||||
/*
|
/*
|
||||||
* There is no need to initialize the internal page map entries unless
|
* There is no need to initialize the internal page map entries unless
|
||||||
* the chunk is not zeroed.
|
* the chunk is not zeroed.
|
||||||
*/
|
*/
|
||||||
if (!zero) {
|
if (!zero) {
|
||||||
for (i = map_bias+1; i < chunk_npages-1; i++)
|
for (i = map_bias+1; i < chunk_npages-1; i++) {
|
||||||
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
|
arena_mapbits_internal_set((arena_chunk_t *)
|
||||||
|
extent_addr_get(extent), i, flag_unzeroed);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (config_debug) {
|
if (config_debug) {
|
||||||
for (i = map_bias+1; i < chunk_npages-1; i++) {
|
for (i = map_bias+1; i < chunk_npages-1; i++) {
|
||||||
assert(arena_mapbits_unzeroed_get(chunk, i) ==
|
assert(arena_mapbits_unzeroed_get(
|
||||||
flag_unzeroed);
|
(arena_chunk_t *)extent_addr_get(extent), i)
|
||||||
|
== flag_unzeroed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
|
arena_mapbits_unallocated_set((arena_chunk_t *)extent_addr_get(extent),
|
||||||
flag_unzeroed);
|
chunk_npages-1, arena_maxrun, flag_unzeroed);
|
||||||
|
|
||||||
return (chunk);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_chunk_t *
|
static extent_t *
|
||||||
arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
|
arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
extent_t *extent;
|
||||||
|
|
||||||
if (arena->spare != NULL)
|
if (arena->spare != NULL)
|
||||||
chunk = arena_chunk_init_spare(arena);
|
extent = arena_chunk_init_spare(arena);
|
||||||
else {
|
else {
|
||||||
chunk = arena_chunk_init_hard(tsdn, arena);
|
extent = arena_chunk_init_hard(tsdn, arena);
|
||||||
if (chunk == NULL)
|
if (extent == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
ql_elm_new(&chunk->extent, ql_link);
|
ql_elm_new(extent, ql_link);
|
||||||
ql_tail_insert(&arena->achunks, &chunk->extent, ql_link);
|
ql_tail_insert(&arena->achunks, extent, ql_link);
|
||||||
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
|
arena_avail_insert(arena, (arena_chunk_t *)extent_addr_get(extent),
|
||||||
|
map_bias, chunk_npages-map_bias);
|
||||||
|
|
||||||
return (chunk);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
|
||||||
{
|
{
|
||||||
bool committed;
|
bool committed;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
|
|
||||||
chunk_deregister(tsdn, &chunk->extent);
|
chunk_deregister(tsdn, extent);
|
||||||
|
|
||||||
committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
|
committed = (arena_mapbits_decommitted_get((arena_chunk_t *)
|
||||||
|
extent_addr_get(extent), map_bias) == 0);
|
||||||
if (!committed) {
|
if (!committed) {
|
||||||
/*
|
/*
|
||||||
* Decommit the header. Mark the chunk as decommitted even if
|
* Decommit the header. Mark the chunk as decommitted even if
|
||||||
@ -675,37 +736,42 @@ arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
* chunk as committed has a high potential for causing later
|
* chunk as committed has a high potential for causing later
|
||||||
* access of decommitted memory.
|
* access of decommitted memory.
|
||||||
*/
|
*/
|
||||||
chunk_decommit_wrapper(tsdn, arena, &chunk_hooks, chunk,
|
chunk_decommit_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
chunksize, 0, map_bias << LG_PAGE);
|
extent_addr_get(extent), extent_size_get(extent), 0,
|
||||||
|
map_bias << LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
|
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks,
|
||||||
committed);
|
extent_addr_get(extent), extent_size_get(extent), committed);
|
||||||
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena->stats.mapped -= chunksize;
|
arena->stats.mapped -= extent_size_get(extent);
|
||||||
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
|
arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
|
arena_spare_discard(tsdn_t *tsdn, arena_t *arena, extent_t *spare)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(arena->spare != spare);
|
assert(arena->spare != spare);
|
||||||
|
|
||||||
if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
|
if (arena_mapbits_dirty_get((arena_chunk_t *)extent_addr_get(spare),
|
||||||
arena_run_dirty_remove(arena, spare, map_bias,
|
map_bias) != 0) {
|
||||||
chunk_npages-map_bias);
|
arena_run_dirty_remove(arena, (arena_chunk_t *)
|
||||||
|
extent_addr_get(spare), map_bias, chunk_npages-map_bias);
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_chunk_discard(tsdn, arena, spare);
|
arena_chunk_discard(tsdn, arena, spare);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
|
||||||
{
|
{
|
||||||
arena_chunk_t *spare;
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
|
||||||
|
extent_t *spare;
|
||||||
|
|
||||||
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
|
||||||
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
|
||||||
@ -721,9 +787,9 @@ arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
/* Remove run from runs_avail, so that the arena does not use it. */
|
/* Remove run from runs_avail, so that the arena does not use it. */
|
||||||
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
|
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
|
||||||
|
|
||||||
ql_remove(&arena->achunks, &chunk->extent, ql_link);
|
ql_remove(&arena->achunks, extent, ql_link);
|
||||||
spare = arena->spare;
|
spare = arena->spare;
|
||||||
arena->spare = chunk;
|
arena->spare = extent;
|
||||||
if (spare != NULL)
|
if (spare != NULL)
|
||||||
arena_spare_discard(tsdn, arena, spare);
|
arena_spare_discard(tsdn, arena, spare);
|
||||||
}
|
}
|
||||||
@ -778,19 +844,6 @@ arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
|
|||||||
arena->stats.hstats[index].ndalloc--;
|
arena->stats.hstats[index].ndalloc--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
|
|
||||||
{
|
|
||||||
szind_t index = size2index(usize) - nlclasses - NBINS;
|
|
||||||
|
|
||||||
cassert(config_stats);
|
|
||||||
|
|
||||||
arena->stats.ndalloc_huge--;
|
|
||||||
arena->stats.allocated_huge += usize;
|
|
||||||
arena->stats.hstats[index].ndalloc--;
|
|
||||||
arena->stats.hstats[index].curhchunks++;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
|
arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
|
||||||
{
|
{
|
||||||
@ -799,15 +852,6 @@ arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
|
|||||||
arena_huge_malloc_stats_update(arena, usize);
|
arena_huge_malloc_stats_update(arena, usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
|
||||||
arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
|
|
||||||
size_t usize)
|
|
||||||
{
|
|
||||||
|
|
||||||
arena_huge_dalloc_stats_update_undo(arena, oldsize);
|
|
||||||
arena_huge_malloc_stats_update_undo(arena, usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
extent_t *
|
extent_t *
|
||||||
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
|
arena_extent_alloc(tsdn_t *tsdn, arena_t *arena)
|
||||||
{
|
{
|
||||||
@ -834,17 +878,17 @@ arena_extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent)
|
|||||||
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
|
malloc_mutex_unlock(tsdn, &arena->extent_cache_mtx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static extent_t *
|
||||||
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
||||||
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
|
chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
|
||||||
size_t csize)
|
size_t csize)
|
||||||
{
|
{
|
||||||
void *ret;
|
extent_t *extent;
|
||||||
bool commit = true;
|
bool commit = true;
|
||||||
|
|
||||||
ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
|
||||||
alignment, zero, &commit);
|
alignment, zero, &commit);
|
||||||
if (ret == NULL) {
|
if (extent == NULL) {
|
||||||
/* Revert optimistic stats updates. */
|
/* Revert optimistic stats updates. */
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
@ -855,14 +899,14 @@ arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
|
|||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
extent_t *
|
||||||
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool *zero)
|
size_t alignment, bool *zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
extent_t *extent;
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
size_t csize = CHUNK_CEILING(usize);
|
size_t csize = CHUNK_CEILING(usize);
|
||||||
|
|
||||||
@ -875,15 +919,15 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
}
|
}
|
||||||
arena_nactive_add(arena, usize >> LG_PAGE);
|
arena_nactive_add(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL,
|
||||||
alignment, zero, true);
|
csize, alignment, zero);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
if (ret == NULL) {
|
if (extent == NULL) {
|
||||||
ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
|
||||||
usize, alignment, zero, csize);
|
usize, alignment, zero, csize);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -900,14 +944,16 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
|
|||||||
}
|
}
|
||||||
arena_nactive_sub(arena, usize >> LG_PAGE);
|
arena_nactive_sub(arena, usize >> LG_PAGE);
|
||||||
|
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
|
arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, chunk, csize,
|
||||||
|
true);
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
size_t oldsize, size_t usize)
|
size_t oldsize)
|
||||||
{
|
{
|
||||||
|
size_t usize = extent_size_get(extent);
|
||||||
|
|
||||||
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
|
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
|
||||||
assert(oldsize != usize);
|
assert(oldsize != usize);
|
||||||
@ -923,9 +969,10 @@ arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
size_t oldsize, size_t usize)
|
size_t oldsize)
|
||||||
{
|
{
|
||||||
|
size_t usize = extent_size_get(extent);
|
||||||
size_t udiff = oldsize - usize;
|
size_t udiff = oldsize - usize;
|
||||||
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||||
|
|
||||||
@ -936,83 +983,24 @@ arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
arena->stats.mapped -= cdiff;
|
arena->stats.mapped -= cdiff;
|
||||||
}
|
}
|
||||||
arena_nactive_sub(arena, udiff >> LG_PAGE);
|
arena_nactive_sub(arena, udiff >> LG_PAGE);
|
||||||
|
|
||||||
if (cdiff != 0) {
|
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
|
||||||
void *nchunk = (void *)((uintptr_t)chunk +
|
|
||||||
CHUNK_CEILING(usize));
|
|
||||||
|
|
||||||
chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
|
||||||
true);
|
|
||||||
}
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
void
|
||||||
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||||
size_t usize)
|
size_t oldsize)
|
||||||
{
|
{
|
||||||
bool err;
|
size_t usize = extent_size_get(extent);
|
||||||
bool zero = false;
|
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
||||||
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
size_t udiff = usize - oldsize;
|
||||||
void *nchunk =
|
|
||||||
(void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
|
|
||||||
size_t udiff = usize - extent_size_get(extent);
|
|
||||||
size_t cdiff = CHUNK_CEILING(usize) -
|
|
||||||
CHUNK_CEILING(extent_size_get(extent));
|
|
||||||
extent_t *trail;
|
|
||||||
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
malloc_mutex_lock(tsdn, &arena->lock);
|
||||||
|
|
||||||
/* Optimistically update stats. */
|
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena_huge_ralloc_stats_update(arena, extent_size_get(extent),
|
arena_huge_ralloc_stats_update(arena, oldsize, usize);
|
||||||
usize);
|
|
||||||
arena->stats.mapped += cdiff;
|
arena->stats.mapped += cdiff;
|
||||||
}
|
}
|
||||||
arena_nactive_add(arena, udiff >> LG_PAGE);
|
arena_nactive_add(arena, udiff >> LG_PAGE);
|
||||||
|
|
||||||
err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
|
||||||
chunksize, &zero, true) == NULL);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
malloc_mutex_unlock(tsdn, &arena->lock);
|
||||||
|
|
||||||
if (err) {
|
|
||||||
bool commit = true;
|
|
||||||
|
|
||||||
if (chunk_alloc_wrapper(tsdn, arena, &chunk_hooks, nchunk,
|
|
||||||
cdiff, chunksize, &zero, &commit) == NULL)
|
|
||||||
goto label_revert;
|
|
||||||
}
|
|
||||||
|
|
||||||
trail = arena_extent_alloc(tsdn, arena);
|
|
||||||
if (trail == NULL) {
|
|
||||||
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
|
||||||
zero, true);
|
|
||||||
goto label_revert;
|
|
||||||
}
|
|
||||||
extent_init(trail, arena, nchunk, cdiff, true, zero, true, false);
|
|
||||||
if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
|
|
||||||
arena_extent_dalloc(tsdn, arena, trail);
|
|
||||||
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
|
|
||||||
zero, true);
|
|
||||||
goto label_revert;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (usize < extent_size_get(extent))
|
|
||||||
extent_size_set(extent, usize);
|
|
||||||
|
|
||||||
return (false);
|
|
||||||
label_revert:
|
|
||||||
/* Revert optimistic stats updates. */
|
|
||||||
malloc_mutex_lock(tsdn, &arena->lock);
|
|
||||||
if (config_stats) {
|
|
||||||
arena_huge_ralloc_stats_update_undo(arena,
|
|
||||||
extent_size_get(extent), usize);
|
|
||||||
arena->stats.mapped -= cdiff;
|
|
||||||
}
|
|
||||||
arena_nactive_sub(arena, udiff >> LG_PAGE);
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->lock);
|
|
||||||
return (true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1053,8 +1041,8 @@ arena_run_alloc_large_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
|
|||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
|
extent_t *extent;
|
||||||
|
|
||||||
assert(size <= arena_maxrun);
|
assert(size <= arena_maxrun);
|
||||||
assert(size == PAGE_CEILING(size));
|
assert(size == PAGE_CEILING(size));
|
||||||
@ -1067,9 +1055,10 @@ arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
|
|||||||
/*
|
/*
|
||||||
* No usable runs. Create a new chunk from which to allocate the run.
|
* No usable runs. Create a new chunk from which to allocate the run.
|
||||||
*/
|
*/
|
||||||
chunk = arena_chunk_alloc(tsdn, arena);
|
extent = arena_chunk_alloc(tsdn, arena);
|
||||||
if (chunk != NULL) {
|
if (extent != NULL) {
|
||||||
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
|
run = &arena_miscelm_get_mutable((arena_chunk_t *)
|
||||||
|
extent_addr_get(extent), map_bias)->run;
|
||||||
if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
|
if (arena_run_split_large(tsdn, arena, iealloc(tsdn, run), run,
|
||||||
size, zero))
|
size, zero))
|
||||||
run = NULL;
|
run = NULL;
|
||||||
@ -1100,8 +1089,8 @@ arena_run_alloc_small_helper(tsdn_t *tsdn, arena_t *arena, size_t size,
|
|||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
|
arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
|
||||||
{
|
{
|
||||||
arena_chunk_t *chunk;
|
|
||||||
arena_run_t *run;
|
arena_run_t *run;
|
||||||
|
extent_t *extent;
|
||||||
|
|
||||||
assert(size <= arena_maxrun);
|
assert(size <= arena_maxrun);
|
||||||
assert(size == PAGE_CEILING(size));
|
assert(size == PAGE_CEILING(size));
|
||||||
@ -1115,9 +1104,10 @@ arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
|
|||||||
/*
|
/*
|
||||||
* No usable runs. Create a new chunk from which to allocate the run.
|
* No usable runs. Create a new chunk from which to allocate the run.
|
||||||
*/
|
*/
|
||||||
chunk = arena_chunk_alloc(tsdn, arena);
|
extent = arena_chunk_alloc(tsdn, arena);
|
||||||
if (chunk != NULL) {
|
if (extent != NULL) {
|
||||||
run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
|
run = &arena_miscelm_get_mutable(
|
||||||
|
(arena_chunk_t *)extent_addr_get(extent), map_bias)->run;
|
||||||
if (arena_run_split_small(tsdn, arena, iealloc(tsdn, run), run,
|
if (arena_run_split_small(tsdn, arena, iealloc(tsdn, run), run,
|
||||||
size, binind))
|
size, binind))
|
||||||
run = NULL;
|
run = NULL;
|
||||||
@ -1420,6 +1410,8 @@ void
|
|||||||
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
|
arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
malloc_mutex_assert_owner(tsdn, &arena->lock);
|
||||||
|
|
||||||
/* Don't recursively purge. */
|
/* Don't recursively purge. */
|
||||||
if (arena->purging)
|
if (arena->purging)
|
||||||
return;
|
return;
|
||||||
@ -1484,7 +1476,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
if (rdelm == &chunkselm->rd) {
|
if (rdelm == &chunkselm->rd) {
|
||||||
extent_t *chunkselm_next;
|
extent_t *chunkselm_next;
|
||||||
bool zero;
|
bool zero;
|
||||||
UNUSED void *chunk;
|
UNUSED extent_t *extent;
|
||||||
|
|
||||||
npages = extent_size_get(chunkselm) >> LG_PAGE;
|
npages = extent_size_get(chunkselm) >> LG_PAGE;
|
||||||
if (opt_purge == purge_mode_decay && arena->ndirty -
|
if (opt_purge == purge_mode_decay && arena->ndirty -
|
||||||
@ -1492,16 +1484,12 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
chunkselm_next = qr_next(chunkselm, cc_link);
|
chunkselm_next = qr_next(chunkselm, cc_link);
|
||||||
/*
|
/* Allocate. */
|
||||||
* Allocate. chunkselm remains valid due to the
|
|
||||||
* dalloc_extent=false argument to chunk_alloc_cache().
|
|
||||||
*/
|
|
||||||
zero = false;
|
zero = false;
|
||||||
chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
|
extent = arena_chunk_cache_alloc_locked(tsdn, arena,
|
||||||
extent_addr_get(chunkselm),
|
chunk_hooks, extent_addr_get(chunkselm),
|
||||||
extent_size_get(chunkselm), chunksize, &zero,
|
extent_size_get(chunkselm), chunksize, &zero);
|
||||||
false);
|
assert(extent == chunkselm);
|
||||||
assert(chunk == extent_addr_get(chunkselm));
|
|
||||||
assert(zero == extent_zeroed_get(chunkselm));
|
assert(zero == extent_zeroed_get(chunkselm));
|
||||||
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
extent_dirty_insert(chunkselm, purge_runs_sentinel,
|
||||||
purge_chunks_sentinel);
|
purge_chunks_sentinel);
|
||||||
@ -1510,14 +1498,13 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
chunkselm = chunkselm_next;
|
chunkselm = chunkselm_next;
|
||||||
} else {
|
} else {
|
||||||
extent_t *extent = iealloc(tsdn, rdelm);
|
extent_t *extent = iealloc(tsdn, rdelm);
|
||||||
arena_chunk_t *chunk =
|
|
||||||
(arena_chunk_t *)extent_addr_get(extent);
|
|
||||||
arena_chunk_map_misc_t *miscelm =
|
arena_chunk_map_misc_t *miscelm =
|
||||||
arena_rd_to_miscelm(rdelm);
|
arena_rd_to_miscelm(rdelm);
|
||||||
size_t pageind = arena_miscelm_to_pageind(miscelm);
|
size_t pageind = arena_miscelm_to_pageind(miscelm);
|
||||||
arena_run_t *run = &miscelm->run;
|
arena_run_t *run = &miscelm->run;
|
||||||
size_t run_size =
|
size_t run_size =
|
||||||
arena_mapbits_unallocated_size_get(chunk, pageind);
|
arena_mapbits_unallocated_size_get((arena_chunk_t *)
|
||||||
|
extent_addr_get(extent), pageind);
|
||||||
|
|
||||||
npages = run_size >> LG_PAGE;
|
npages = run_size >> LG_PAGE;
|
||||||
if (opt_purge == purge_mode_decay && arena->ndirty -
|
if (opt_purge == purge_mode_decay && arena->ndirty -
|
||||||
@ -1525,14 +1512,16 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
assert(pageind + npages <= chunk_npages);
|
assert(pageind + npages <= chunk_npages);
|
||||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
assert(arena_mapbits_dirty_get((arena_chunk_t *)
|
||||||
arena_mapbits_dirty_get(chunk, pageind+npages-1));
|
extent_addr_get(extent), pageind) ==
|
||||||
|
arena_mapbits_dirty_get((arena_chunk_t *)
|
||||||
|
extent_addr_get(extent), pageind+npages-1));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If purging the spare chunk's run, make it available
|
* If purging the spare chunk's run, make it available
|
||||||
* prior to allocation.
|
* prior to allocation.
|
||||||
*/
|
*/
|
||||||
if (chunk == arena->spare)
|
if (extent == arena->spare)
|
||||||
arena_chunk_alloc(tsdn, arena);
|
arena_chunk_alloc(tsdn, arena);
|
||||||
|
|
||||||
/* Temporarily allocate the free dirty run. */
|
/* Temporarily allocate the free dirty run. */
|
||||||
@ -1757,8 +1746,9 @@ arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
|
arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, extent_t *extent)
|
||||||
{
|
{
|
||||||
|
arena_chunk_t *chunk = (arena_chunk_t *)extent_addr_get(extent);
|
||||||
size_t pageind, npages;
|
size_t pageind, npages;
|
||||||
|
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
@ -1773,10 +1763,10 @@ arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
|
|||||||
if (arena_mapbits_large_get(chunk, pageind) != 0) {
|
if (arena_mapbits_large_get(chunk, pageind) != 0) {
|
||||||
void *ptr = (void *)((uintptr_t)chunk + (pageind
|
void *ptr = (void *)((uintptr_t)chunk + (pageind
|
||||||
<< LG_PAGE));
|
<< LG_PAGE));
|
||||||
size_t usize = isalloc(tsd_tsdn(tsd),
|
size_t usize = isalloc(tsd_tsdn(tsd), extent,
|
||||||
&chunk->extent, ptr, config_prof);
|
ptr, config_prof);
|
||||||
|
|
||||||
prof_free(tsd, &chunk->extent, ptr, usize);
|
prof_free(tsd, extent, ptr, usize);
|
||||||
npages = arena_mapbits_large_size_get(chunk,
|
npages = arena_mapbits_large_size_get(chunk,
|
||||||
pageind) >> LG_PAGE;
|
pageind) >> LG_PAGE;
|
||||||
} else {
|
} else {
|
||||||
@ -1819,8 +1809,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
/* Remove large allocations from prof sample set. */
|
/* Remove large allocations from prof sample set. */
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof) {
|
||||||
ql_foreach(extent, &arena->achunks, ql_link) {
|
ql_foreach(extent, &arena->achunks, ql_link) {
|
||||||
arena_achunk_prof_reset(tsd, arena,
|
arena_achunk_prof_reset(tsd, arena, extent);
|
||||||
extent_addr_get(extent));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1845,7 +1834,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
/* Remove huge allocation from prof sample set. */
|
/* Remove huge allocation from prof sample set. */
|
||||||
if (config_prof && opt_prof)
|
if (config_prof && opt_prof)
|
||||||
prof_free(tsd, extent, ptr, usize);
|
prof_free(tsd, extent, ptr, usize);
|
||||||
huge_dalloc(tsd_tsdn(tsd), extent, ptr);
|
huge_dalloc(tsd_tsdn(tsd), extent);
|
||||||
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
|
||||||
/* Cancel out unwanted effects on stats. */
|
/* Cancel out unwanted effects on stats. */
|
||||||
if (config_stats)
|
if (config_stats)
|
||||||
@ -1883,8 +1872,7 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
|
for (extent = ql_last(&arena->achunks, ql_link); extent != NULL; extent
|
||||||
= ql_last(&arena->achunks, ql_link)) {
|
= ql_last(&arena->achunks, ql_link)) {
|
||||||
ql_remove(&arena->achunks, extent, ql_link);
|
ql_remove(&arena->achunks, extent, ql_link);
|
||||||
arena_chunk_discard(tsd_tsdn(tsd), arena,
|
arena_chunk_discard(tsd_tsdn(tsd), arena, extent);
|
||||||
extent_addr_get(extent));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Spare. */
|
/* Spare. */
|
||||||
@ -2078,7 +2066,7 @@ arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
|||||||
if (size == arena_maxrun) {
|
if (size == arena_maxrun) {
|
||||||
assert(run_ind == map_bias);
|
assert(run_ind == map_bias);
|
||||||
assert(run_pages == (arena_maxrun >> LG_PAGE));
|
assert(run_pages == (arena_maxrun >> LG_PAGE));
|
||||||
arena_chunk_dalloc(tsdn, arena, chunk);
|
arena_chunk_dalloc(tsdn, arena, extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3113,10 +3101,12 @@ arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
|||||||
|
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
} else {
|
} else if (oldsize >= chunksize && usize_max >= chunksize) {
|
||||||
return (huge_ralloc_no_move(tsdn, extent, ptr, oldsize,
|
return (huge_ralloc_no_move(tsdn, extent, usize_min, usize_max,
|
||||||
usize_min, usize_max, zero));
|
zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return (true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
@ -3138,27 +3128,30 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
|||||||
size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache)
|
size_t oldsize, size_t size, size_t alignment, bool zero, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t usize;
|
size_t usize, copysize;
|
||||||
|
|
||||||
usize = s2u(size);
|
usize = s2u(size);
|
||||||
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
|
if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
if (likely(usize <= large_maxclass)) {
|
if (likely(usize <= large_maxclass)) {
|
||||||
size_t copysize;
|
|
||||||
|
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
|
if (!arena_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, 0,
|
||||||
zero))
|
zero))
|
||||||
return (ptr);
|
return (ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (oldsize >= chunksize && usize >= chunksize) {
|
||||||
|
return (huge_ralloc(tsdn, arena, extent, usize, alignment, zero,
|
||||||
|
tcache));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* size and oldsize are different enough that we need to move
|
* size and oldsize are different enough that we need to move the
|
||||||
* the object. In that case, fall back to allocating new space
|
* object. In that case, fall back to allocating new space and copying.
|
||||||
* and copying.
|
|
||||||
*/
|
*/
|
||||||
ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
|
ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, zero,
|
||||||
zero, tcache);
|
tcache);
|
||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
@ -3170,10 +3163,6 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
|||||||
copysize = (usize < oldsize) ? usize : oldsize;
|
copysize = (usize < oldsize) ? usize : oldsize;
|
||||||
memcpy(ret, ptr, copysize);
|
memcpy(ret, ptr, copysize);
|
||||||
isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
|
isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
|
||||||
} else {
|
|
||||||
ret = huge_ralloc(tsdn, arena, extent, ptr, oldsize, usize,
|
|
||||||
alignment, zero, tcache);
|
|
||||||
}
|
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
245
src/chunk.c
245
src/chunk.c
@ -58,7 +58,8 @@ static void chunk_record(tsdn_t *tsdn, arena_t *arena,
|
|||||||
static void
|
static void
|
||||||
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
size_t psz =
|
||||||
|
extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent)));
|
||||||
pszind_t pind = psz2ind(psz);
|
pszind_t pind = psz2ind(psz);
|
||||||
extent_heap_insert(&extent_heaps[pind], extent);
|
extent_heap_insert(&extent_heaps[pind], extent);
|
||||||
}
|
}
|
||||||
@ -66,7 +67,8 @@ extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
|||||||
static void
|
static void
|
||||||
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t psz = extent_size_quantize_floor(extent_size_get(extent));
|
size_t psz =
|
||||||
|
extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent)));
|
||||||
pszind_t pind = psz2ind(psz);
|
pszind_t pind = psz2ind(psz);
|
||||||
extent_heap_remove(&extent_heaps[pind], extent);
|
extent_heap_remove(&extent_heaps[pind], extent);
|
||||||
}
|
}
|
||||||
@ -211,7 +213,7 @@ chunk_register(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent);
|
||||||
extent_rtree_release(tsdn, elm_a, elm_b);
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||||
|
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof && extent_active_get(extent)) {
|
||||||
size_t nadd = (extent_size_get(extent) == 0) ? 1 :
|
size_t nadd = (extent_size_get(extent) == 0) ? 1 :
|
||||||
extent_size_get(extent) / chunksize;
|
extent_size_get(extent) / chunksize;
|
||||||
size_t cur = atomic_add_z(&curchunks, nadd);
|
size_t cur = atomic_add_z(&curchunks, nadd);
|
||||||
@ -239,7 +241,7 @@ chunk_deregister(tsdn_t *tsdn, const extent_t *extent)
|
|||||||
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
|
extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL);
|
||||||
extent_rtree_release(tsdn, elm_a, elm_b);
|
extent_rtree_release(tsdn, elm_a, elm_b);
|
||||||
|
|
||||||
if (config_prof && opt_prof) {
|
if (config_prof && opt_prof && extent_active_get(extent)) {
|
||||||
size_t nsub = (extent_size_get(extent) == 0) ? 1 :
|
size_t nsub = (extent_size_get(extent) == 0) ? 1 :
|
||||||
extent_size_get(extent) / chunksize;
|
extent_size_get(extent) / chunksize;
|
||||||
assert(atomic_read_z(&curchunks) >= nsub);
|
assert(atomic_read_z(&curchunks) >= nsub);
|
||||||
@ -293,23 +295,15 @@ chunk_leak(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool cache,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static extent_t *
|
||||||
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
|
extent_heap_t extent_heaps[NPSIZES], bool cache, void *new_addr,
|
||||||
size_t size, size_t alignment, bool *zero, bool *commit, bool dalloc_extent)
|
size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
size_t alloc_size, leadsize, trailsize;
|
size_t alloc_size, leadsize, trailsize;
|
||||||
bool zeroed, committed;
|
|
||||||
|
|
||||||
assert(new_addr == NULL || alignment == chunksize);
|
assert(new_addr == NULL || alignment == chunksize);
|
||||||
/*
|
|
||||||
* Cached chunks use the extent linkage embedded in their headers, in
|
|
||||||
* which case dalloc_extent is true, and new_addr is non-NULL because
|
|
||||||
* we're operating on a specific chunk.
|
|
||||||
*/
|
|
||||||
assert(dalloc_extent || new_addr != NULL);
|
|
||||||
|
|
||||||
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
|
||||||
/* Beware size_t wrap-around. */
|
/* Beware size_t wrap-around. */
|
||||||
@ -338,99 +332,79 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
extent_heaps_remove(extent_heaps, extent);
|
||||||
|
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
||||||
|
|
||||||
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
|
||||||
alignment) - (uintptr_t)extent_addr_get(extent);
|
alignment) - (uintptr_t)extent_addr_get(extent);
|
||||||
assert(new_addr == NULL || leadsize == 0);
|
assert(new_addr == NULL || leadsize == 0);
|
||||||
assert(extent_size_get(extent) >= leadsize + size);
|
assert(extent_size_get(extent) >= leadsize + size);
|
||||||
trailsize = extent_size_get(extent) - leadsize - size;
|
trailsize = extent_size_get(extent) - leadsize - size;
|
||||||
ret = (void *)((uintptr_t)extent_addr_get(extent) + leadsize);
|
if (extent_zeroed_get(extent))
|
||||||
zeroed = extent_zeroed_get(extent);
|
|
||||||
if (zeroed)
|
|
||||||
*zero = true;
|
*zero = true;
|
||||||
committed = extent_committed_get(extent);
|
if (extent_committed_get(extent))
|
||||||
if (committed)
|
|
||||||
*commit = true;
|
*commit = true;
|
||||||
|
|
||||||
/* Split the lead. */
|
/* Split the lead. */
|
||||||
if (leadsize != 0 &&
|
|
||||||
chunk_hooks->split(extent_addr_get(extent),
|
|
||||||
extent_size_get(extent), leadsize, size, false, arena->ind)) {
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
/* Remove extent from the heap. */
|
|
||||||
chunk_deregister(tsdn, extent);
|
|
||||||
extent_heaps_remove(extent_heaps, extent);
|
|
||||||
arena_chunk_cache_maybe_remove(arena, extent, cache);
|
|
||||||
if (leadsize != 0) {
|
if (leadsize != 0) {
|
||||||
/* Insert the leading space as a smaller chunk. */
|
extent_t *lead = extent;
|
||||||
extent_size_set(extent, leadsize);
|
extent = chunk_split_wrapper(tsdn, arena, chunk_hooks, lead,
|
||||||
if (chunk_register(tsdn, extent)) {
|
leadsize, size + trailsize);
|
||||||
|
if (extent == NULL) {
|
||||||
chunk_leak(tsdn, arena, chunk_hooks, cache,
|
chunk_leak(tsdn, arena, chunk_hooks, cache,
|
||||||
extent_addr_get(extent), extent_size_get(extent));
|
extent_addr_get(lead), extent_size_get(lead));
|
||||||
arena_extent_dalloc(tsdn, arena, extent);
|
arena_extent_dalloc(tsdn, arena, lead);
|
||||||
} else {
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
extent_heaps_insert(extent_heaps, extent);
|
return (NULL);
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
|
||||||
}
|
}
|
||||||
extent = NULL;
|
extent_heaps_insert(extent_heaps, lead);
|
||||||
|
arena_chunk_cache_maybe_insert(arena, lead, cache);
|
||||||
}
|
}
|
||||||
if (trailsize != 0) {
|
|
||||||
/* Split the trail. */
|
/* Split the trail. */
|
||||||
if (chunk_hooks->split(ret, size + trailsize, size,
|
if (trailsize != 0) {
|
||||||
trailsize, false, arena->ind)) {
|
extent_t *trail = chunk_split_wrapper(tsdn, arena, chunk_hooks,
|
||||||
if (dalloc_extent && extent != NULL)
|
extent, size, trailsize);
|
||||||
arena_extent_dalloc(tsdn, arena, extent);
|
if (trail == NULL) {
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
|
||||||
chunk_record(tsdn, arena, chunk_hooks, extent_heaps,
|
|
||||||
cache, ret, size + trailsize, zeroed, committed);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
/* Insert the trailing space as a smaller chunk. */
|
|
||||||
if (extent == NULL) {
|
|
||||||
extent = arena_extent_alloc(tsdn, arena);
|
|
||||||
if (extent == NULL) {
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
|
||||||
chunk_record(tsdn, arena, chunk_hooks,
|
|
||||||
extent_heaps, cache, ret, size + trailsize,
|
|
||||||
zeroed, committed);
|
|
||||||
return (NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
extent_init(extent, arena, (void *)((uintptr_t)(ret) + size),
|
|
||||||
trailsize, false, zeroed, committed, false);
|
|
||||||
if (chunk_register(tsdn, extent)) {
|
|
||||||
chunk_leak(tsdn, arena, chunk_hooks, cache,
|
chunk_leak(tsdn, arena, chunk_hooks, cache,
|
||||||
extent_addr_get(extent), extent_size_get(extent));
|
extent_addr_get(extent), extent_size_get(extent));
|
||||||
arena_extent_dalloc(tsdn, arena, extent);
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
} else {
|
|
||||||
extent_heaps_insert(extent_heaps, extent);
|
|
||||||
arena_chunk_cache_maybe_insert(arena, extent, cache);
|
|
||||||
}
|
|
||||||
extent = NULL;
|
|
||||||
}
|
|
||||||
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache, ret,
|
|
||||||
size, zeroed, committed);
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
extent_heaps_insert(extent_heaps, trail);
|
||||||
|
arena_chunk_cache_maybe_insert(arena, trail, cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!extent_committed_get(extent) &&
|
||||||
|
chunk_hooks->commit(extent_addr_get(extent),
|
||||||
|
extent_size_get(extent), 0, extent_size_get(extent), arena->ind)) {
|
||||||
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
|
chunk_record(tsdn, arena, chunk_hooks, extent_heaps, cache,
|
||||||
|
extent_addr_get(extent), extent_size_get(extent),
|
||||||
|
extent_zeroed_get(extent), extent_committed_get(extent));
|
||||||
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
extent_active_set(extent, true);
|
||||||
|
|
||||||
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
|
||||||
|
|
||||||
assert(dalloc_extent || extent != NULL);
|
|
||||||
if (dalloc_extent && extent != NULL)
|
|
||||||
arena_extent_dalloc(tsdn, arena, extent);
|
|
||||||
if (*zero) {
|
if (*zero) {
|
||||||
if (!zeroed)
|
if (!extent_zeroed_get(extent)) {
|
||||||
memset(ret, 0, size);
|
memset(extent_addr_get(extent), 0,
|
||||||
else if (config_debug) {
|
extent_size_get(extent));
|
||||||
|
} else if (config_debug) {
|
||||||
size_t i;
|
size_t i;
|
||||||
size_t *p = (size_t *)(uintptr_t)ret;
|
size_t *p = (size_t *)(uintptr_t)
|
||||||
|
extent_addr_get(extent);
|
||||||
|
|
||||||
for (i = 0; i < size / sizeof(size_t); i++)
|
for (i = 0; i < size / sizeof(size_t); i++)
|
||||||
assert(p[i] == 0);
|
assert(p[i] == 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return (ret);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -469,12 +443,11 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
extent_t *
|
||||||
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero,
|
void *new_addr, size_t size, size_t alignment, bool *zero)
|
||||||
bool dalloc_extent)
|
|
||||||
{
|
{
|
||||||
void *ret;
|
extent_t *extent;
|
||||||
bool commit;
|
bool commit;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
@ -483,12 +456,12 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
commit = true;
|
commit = true;
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
|
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached,
|
||||||
true, new_addr, size, alignment, zero, &commit, dalloc_extent);
|
true, new_addr, size, alignment, zero, &commit);
|
||||||
if (ret == NULL)
|
if (extent == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
assert(commit);
|
assert(commit);
|
||||||
return (ret);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static arena_t *
|
static arena_t *
|
||||||
@ -523,44 +496,51 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
|
|||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *
|
static extent_t *
|
||||||
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
extent_t *extent;
|
||||||
|
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
assert((size & chunksize_mask) == 0);
|
assert((size & chunksize_mask) == 0);
|
||||||
assert(alignment != 0);
|
assert(alignment != 0);
|
||||||
assert((alignment & chunksize_mask) == 0);
|
assert((alignment & chunksize_mask) == 0);
|
||||||
|
|
||||||
ret = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
|
extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained,
|
||||||
false, new_addr, size, alignment, zero, commit, true);
|
false, new_addr, size, alignment, zero, commit);
|
||||||
|
|
||||||
if (config_stats && ret != NULL)
|
if (config_stats && extent != NULL)
|
||||||
arena->stats.retained -= size;
|
arena->stats.retained -= size;
|
||||||
|
|
||||||
return (ret);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
extent_t *
|
||||||
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
|
||||||
{
|
{
|
||||||
void *ret;
|
extent_t *extent;
|
||||||
|
|
||||||
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
extent = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
|
||||||
alignment, zero, commit);
|
alignment, zero, commit);
|
||||||
if (ret == NULL) {
|
if (extent == NULL) {
|
||||||
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
|
void *chunk;
|
||||||
commit, arena->ind);
|
|
||||||
if (ret == NULL)
|
extent = arena_extent_alloc(tsdn, arena);
|
||||||
|
if (extent == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
chunk = chunk_hooks->alloc(new_addr, size, alignment,
|
||||||
|
zero, commit, arena->ind);
|
||||||
|
if (chunk == NULL)
|
||||||
|
return (NULL);
|
||||||
|
extent_init(extent, arena, chunk, size, true, zero, commit,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return (ret);
|
return (extent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -668,7 +648,6 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
|
|
||||||
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
|
chunk_record(tsdn, arena, chunk_hooks, arena->chunks_cached, true,
|
||||||
chunk, size, false, committed);
|
chunk, size, false, committed);
|
||||||
arena_maybe_purge(tsdn, arena);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
@ -779,6 +758,67 @@ chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
|
|||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extent_t *
|
||||||
|
chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
||||||
|
extent_t *extent, size_t size_a, size_t size_b)
|
||||||
|
{
|
||||||
|
extent_t *trail;
|
||||||
|
rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b;
|
||||||
|
|
||||||
|
assert(CHUNK_CEILING(size_a) == size_a);
|
||||||
|
assert(CHUNK_CEILING(extent_size_get(extent)) == size_a +
|
||||||
|
CHUNK_CEILING(size_b));
|
||||||
|
|
||||||
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
|
|
||||||
|
trail = arena_extent_alloc(tsdn, arena);
|
||||||
|
if (trail == NULL)
|
||||||
|
goto label_error_a;
|
||||||
|
|
||||||
|
{
|
||||||
|
extent_t lead;
|
||||||
|
|
||||||
|
extent_init(&lead, arena, extent_addr_get(extent), size_a,
|
||||||
|
extent_active_get(extent), extent_zeroed_get(extent),
|
||||||
|
extent_committed_get(extent), extent_slab_get(extent));
|
||||||
|
|
||||||
|
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
|
||||||
|
&lead_elm_b))
|
||||||
|
goto label_error_b;
|
||||||
|
}
|
||||||
|
|
||||||
|
extent_init(trail, arena, (void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
|
size_a), CHUNK_CEILING(size_b), extent_active_get(extent),
|
||||||
|
extent_zeroed_get(extent), extent_committed_get(extent),
|
||||||
|
extent_slab_get(extent));
|
||||||
|
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
|
||||||
|
&trail_elm_b))
|
||||||
|
goto label_error_c;
|
||||||
|
|
||||||
|
if (chunk_hooks->split(extent_addr_get(extent), size_a +
|
||||||
|
CHUNK_CEILING(size_b), size_a, CHUNK_CEILING(size_b),
|
||||||
|
extent_committed_get(extent), arena->ind))
|
||||||
|
goto label_error_d;
|
||||||
|
|
||||||
|
extent_size_set(extent, size_a);
|
||||||
|
|
||||||
|
extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent);
|
||||||
|
extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail);
|
||||||
|
|
||||||
|
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
||||||
|
extent_rtree_release(tsdn, trail_elm_a, trail_elm_b);
|
||||||
|
|
||||||
|
return (trail);
|
||||||
|
label_error_d:
|
||||||
|
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
||||||
|
label_error_c:
|
||||||
|
extent_rtree_release(tsdn, lead_elm_a, lead_elm_b);
|
||||||
|
label_error_b:
|
||||||
|
arena_extent_dalloc(tsdn, arena, trail);
|
||||||
|
label_error_a:
|
||||||
|
return (NULL);
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
|
||||||
bool committed, unsigned arena_ind)
|
bool committed, unsigned arena_ind)
|
||||||
@ -801,6 +841,7 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
|
|||||||
{
|
{
|
||||||
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
|
||||||
|
|
||||||
|
chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
|
||||||
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
|
if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a),
|
||||||
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
|
extent_addr_get(b), extent_size_get(b), extent_committed_get(a),
|
||||||
arena->ind))
|
arena->ind))
|
||||||
|
254
src/huge.c
254
src/huge.c
@ -16,7 +16,6 @@ void *
|
|||||||
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||||
bool zero)
|
bool zero)
|
||||||
{
|
{
|
||||||
void *ret;
|
|
||||||
size_t ausize;
|
size_t ausize;
|
||||||
extent_t *extent;
|
extent_t *extent;
|
||||||
bool is_zeroed;
|
bool is_zeroed;
|
||||||
@ -30,12 +29,6 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
assert(ausize >= chunksize);
|
assert(ausize >= chunksize);
|
||||||
|
|
||||||
/* Allocate an extent with which to track the chunk. */
|
|
||||||
extent = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_t)),
|
|
||||||
CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
|
|
||||||
if (extent == NULL)
|
|
||||||
return (NULL);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
|
||||||
* it is possible to make correct junk/zero fill decisions below.
|
* it is possible to make correct junk/zero fill decisions below.
|
||||||
@ -43,19 +36,17 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
is_zeroed = zero;
|
is_zeroed = zero;
|
||||||
if (likely(!tsdn_null(tsdn)))
|
if (likely(!tsdn_null(tsdn)))
|
||||||
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
arena = arena_choose(tsdn_tsd(tsdn), arena);
|
||||||
if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
|
if (unlikely(arena == NULL) || (extent = arena_chunk_alloc_huge(tsdn,
|
||||||
arena, usize, alignment, &is_zeroed)) == NULL) {
|
arena, usize, alignment, &is_zeroed)) == NULL)
|
||||||
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
|
|
||||||
true);
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
|
||||||
|
|
||||||
extent_init(extent, arena, ret, usize, true, is_zeroed, true, false);
|
if (usize < extent_size_get(extent))
|
||||||
|
extent_size_set(extent, usize);
|
||||||
|
|
||||||
if (chunk_register(tsdn, extent)) {
|
if (chunk_register(tsdn, extent)) {
|
||||||
arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
|
arena_chunk_dalloc_huge(tsdn, arena, extent_addr_get(extent),
|
||||||
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true,
|
usize);
|
||||||
true);
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,12 +58,12 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!is_zeroed)
|
if (!is_zeroed)
|
||||||
memset(ret, 0, usize);
|
memset(extent_addr_get(extent), 0, usize);
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc))
|
} else if (config_fill && unlikely(opt_junk_alloc))
|
||||||
memset(ret, JEMALLOC_ALLOC_JUNK, usize);
|
memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, usize);
|
||||||
|
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
return (ret);
|
return (extent_addr_get(extent));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef JEMALLOC_JET
|
#ifdef JEMALLOC_JET
|
||||||
@ -99,11 +90,12 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void
|
static void
|
||||||
huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||||
size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
|
size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
size_t usize, usize_next;
|
size_t usize, usize_next;
|
||||||
arena_t *arena;
|
arena_t *arena = extent_arena_get(extent);
|
||||||
|
size_t oldsize = extent_size_get(extent);
|
||||||
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
|
||||||
bool pre_zeroed, post_zeroed;
|
bool pre_zeroed, post_zeroed;
|
||||||
|
|
||||||
@ -115,20 +107,19 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|||||||
if (oldsize == usize)
|
if (oldsize == usize)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
|
||||||
pre_zeroed = extent_zeroed_get(extent);
|
pre_zeroed = extent_zeroed_get(extent);
|
||||||
|
|
||||||
/* Fill if necessary (shrinking). */
|
/* Fill if necessary (shrinking). */
|
||||||
if (oldsize > usize) {
|
if (oldsize > usize) {
|
||||||
size_t sdiff = oldsize - usize;
|
size_t sdiff = oldsize - usize;
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
memset((void *)((uintptr_t)ptr + usize),
|
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
JEMALLOC_FREE_JUNK, sdiff);
|
usize), JEMALLOC_FREE_JUNK, sdiff);
|
||||||
post_zeroed = false;
|
post_zeroed = false;
|
||||||
} else {
|
} else {
|
||||||
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
||||||
&chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
|
&chunk_hooks, extent_addr_get(extent),
|
||||||
sdiff);
|
CHUNK_CEILING(oldsize), usize, sdiff);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
post_zeroed = pre_zeroed;
|
post_zeroed = pre_zeroed;
|
||||||
@ -143,132 +134,157 @@ huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
|||||||
/* Update zeroed. */
|
/* Update zeroed. */
|
||||||
extent_zeroed_set(extent, post_zeroed);
|
extent_zeroed_set(extent, post_zeroed);
|
||||||
|
|
||||||
arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
|
arena_chunk_ralloc_huge_similar(tsdn, arena, extent, oldsize);
|
||||||
|
|
||||||
/* Fill if necessary (growing). */
|
/* Fill if necessary (growing). */
|
||||||
if (oldsize < usize) {
|
if (oldsize < usize) {
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
if (!pre_zeroed) {
|
if (!pre_zeroed) {
|
||||||
memset((void *)((uintptr_t)ptr + oldsize), 0,
|
memset((void *)
|
||||||
usize - oldsize);
|
((uintptr_t)extent_addr_get(extent) +
|
||||||
|
oldsize), 0, usize - oldsize);
|
||||||
}
|
}
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||||
memset((void *)((uintptr_t)ptr + oldsize),
|
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize)
|
||||||
size_t oldsize, size_t usize)
|
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena = extent_arena_get(extent);
|
||||||
chunk_hooks_t chunk_hooks;
|
size_t oldsize = extent_size_get(extent);
|
||||||
size_t cdiff;
|
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
bool pre_zeroed, post_zeroed;
|
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
||||||
|
size_t sdiff = CHUNK_CEILING(usize) - usize;
|
||||||
arena = extent_arena_get(extent);
|
|
||||||
pre_zeroed = extent_zeroed_get(extent);
|
|
||||||
chunk_hooks = chunk_hooks_get(tsdn, arena);
|
|
||||||
|
|
||||||
assert(oldsize > usize);
|
assert(oldsize > usize);
|
||||||
|
|
||||||
/* Split excess chunks. */
|
/* Split excess chunks. */
|
||||||
cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
|
if (cdiff != 0) {
|
||||||
if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
|
extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
CHUNK_CEILING(usize), cdiff, true, arena->ind))
|
extent, CHUNK_CEILING(usize), cdiff);
|
||||||
|
if (trail == NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
if (oldsize > usize) {
|
|
||||||
size_t sdiff = oldsize - usize;
|
|
||||||
if (config_fill && unlikely(opt_junk_free)) {
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize),
|
huge_dalloc_junk(tsdn, extent_addr_get(trail),
|
||||||
sdiff);
|
extent_size_get(trail));
|
||||||
|
}
|
||||||
|
|
||||||
|
arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks,
|
||||||
|
extent_addr_get(trail), extent_size_get(trail),
|
||||||
|
extent_committed_get(trail));
|
||||||
|
|
||||||
|
arena_extent_dalloc(tsdn, arena, trail);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Optionally fill trailing subchunk. */
|
||||||
|
if (sdiff != 0) {
|
||||||
|
bool post_zeroed;
|
||||||
|
|
||||||
|
if (config_fill && unlikely(opt_junk_free)) {
|
||||||
|
huge_dalloc_junk(tsdn,
|
||||||
|
(void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
|
usize), sdiff);
|
||||||
post_zeroed = false;
|
post_zeroed = false;
|
||||||
} else {
|
} else {
|
||||||
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
post_zeroed = !chunk_purge_wrapper(tsdn, arena,
|
||||||
&chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
|
&chunk_hooks, extent_addr_get(extent),
|
||||||
usize), CHUNK_CEILING(oldsize),
|
CHUNK_CEILING(usize), usize, sdiff);
|
||||||
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
|
|
||||||
|
if (config_fill && unlikely(opt_zero) && !post_zeroed) {
|
||||||
|
memset((void *)
|
||||||
|
((uintptr_t)extent_addr_get(extent) +
|
||||||
|
usize), 0, sdiff);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else
|
|
||||||
post_zeroed = pre_zeroed;
|
|
||||||
|
|
||||||
/* Update the size of the huge allocation. */
|
|
||||||
chunk_deregister(tsdn, extent);
|
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
|
||||||
extent_size_set(extent, usize);
|
|
||||||
/* Update zeroed. */
|
|
||||||
extent_zeroed_set(extent, post_zeroed);
|
extent_zeroed_set(extent, post_zeroed);
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
}
|
||||||
chunk_reregister(tsdn, extent);
|
|
||||||
|
|
||||||
/* Zap the excess chunks. */
|
arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldsize);
|
||||||
arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
|
||||||
size_t oldsize, size_t usize, bool zero)
|
bool zero)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena = extent_arena_get(extent);
|
||||||
bool is_zeroed_subchunk;
|
size_t oldsize = extent_size_get(extent);
|
||||||
|
bool is_zeroed_subchunk = extent_zeroed_get(extent);
|
||||||
|
bool is_zeroed_chunk = false;
|
||||||
|
chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
|
||||||
|
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
|
||||||
|
void *nchunk =
|
||||||
|
(void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent));
|
||||||
|
extent_t *trail;
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks, nchunk,
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
cdiff, chunksize, &is_zeroed_chunk)) == NULL) {
|
||||||
is_zeroed_subchunk = extent_zeroed_get(extent);
|
bool commit = true;
|
||||||
malloc_mutex_unlock(tsdn, &arena->huge_mtx);
|
if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
|
nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit)) ==
|
||||||
if (arena_chunk_ralloc_huge_expand(tsdn, arena, extent, usize))
|
NULL)
|
||||||
return (true);
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (chunk_merge_wrapper(tsdn, arena, &chunk_hooks, extent, trail)) {
|
||||||
|
arena_extent_dalloc(tsdn, arena, trail);
|
||||||
|
chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks,
|
||||||
|
extent_addr_get(trail), extent_size_get(trail),
|
||||||
|
extent_zeroed_get(trail), extent_committed_get(trail));
|
||||||
|
return (true);
|
||||||
|
}
|
||||||
|
|
||||||
if (zero || (config_fill && unlikely(opt_zero))) {
|
if (zero || (config_fill && unlikely(opt_zero))) {
|
||||||
bool is_zeroed_chunk = extent_zeroed_get(extent);
|
|
||||||
|
|
||||||
if (!is_zeroed_subchunk) {
|
if (!is_zeroed_subchunk) {
|
||||||
memset((void *)((uintptr_t)ptr + oldsize), 0,
|
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
CHUNK_CEILING(oldsize) - oldsize);
|
oldsize), 0, CHUNK_CEILING(oldsize) - oldsize);
|
||||||
}
|
}
|
||||||
if (!is_zeroed_chunk) {
|
if (!is_zeroed_chunk) {
|
||||||
memset((void *)((uintptr_t)ptr +
|
memset((void *)((uintptr_t)extent_addr_get(extent) +
|
||||||
CHUNK_CEILING(oldsize)), 0, usize -
|
CHUNK_CEILING(oldsize)), 0, usize -
|
||||||
CHUNK_CEILING(oldsize));
|
CHUNK_CEILING(oldsize));
|
||||||
}
|
}
|
||||||
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
} else if (config_fill && unlikely(opt_junk_alloc)) {
|
||||||
memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
|
memset((void *)((uintptr_t)extent_addr_get(extent) + oldsize),
|
||||||
usize - oldsize);
|
JEMALLOC_ALLOC_JUNK, usize - oldsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (usize < extent_size_get(extent))
|
||||||
|
extent_size_set(extent, usize);
|
||||||
|
|
||||||
|
arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldsize);
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||||
size_t usize_min, size_t usize_max, bool zero)
|
size_t usize_max, bool zero)
|
||||||
{
|
{
|
||||||
|
|
||||||
assert(s2u(oldsize) == oldsize);
|
assert(s2u(extent_size_get(extent)) == extent_size_get(extent));
|
||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
|
assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
|
||||||
|
/* Both allocation sizes must be huge to avoid a move. */
|
||||||
|
assert(extent_size_get(extent) >= chunksize && usize_max >= chunksize);
|
||||||
|
|
||||||
/* Both allocations must be huge to avoid a move. */
|
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(extent_size_get(extent))) {
|
||||||
if (oldsize < chunksize || usize_max < chunksize)
|
|
||||||
return (true);
|
|
||||||
|
|
||||||
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
|
|
||||||
/* Attempt to expand the allocation in-place. */
|
/* Attempt to expand the allocation in-place. */
|
||||||
if (!huge_ralloc_no_move_expand(tsdn, extent, ptr, oldsize,
|
if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max,
|
||||||
usize_max, zero)) {
|
zero)) {
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
/* Try again, this time with usize_min. */
|
/* Try again, this time with usize_min. */
|
||||||
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
|
||||||
CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
|
CHUNK_CEILING(extent_size_get(extent)) &&
|
||||||
extent, ptr, oldsize, usize_min, zero)) {
|
huge_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) {
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -278,18 +294,18 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t oldsize,
|
|||||||
* Avoid moving the allocation if the existing chunk size accommodates
|
* Avoid moving the allocation if the existing chunk size accommodates
|
||||||
* the new size.
|
* the new size.
|
||||||
*/
|
*/
|
||||||
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
|
if (CHUNK_CEILING(extent_size_get(extent)) >= CHUNK_CEILING(usize_min)
|
||||||
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
|
&& CHUNK_CEILING(extent_size_get(extent)) <=
|
||||||
huge_ralloc_no_move_similar(tsdn, extent, ptr, oldsize,
|
CHUNK_CEILING(usize_max)) {
|
||||||
usize_min, usize_max, zero);
|
huge_ralloc_no_move_similar(tsdn, extent, usize_min, usize_max,
|
||||||
|
zero);
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Attempt to shrink the allocation in-place. */
|
/* Attempt to shrink the allocation in-place. */
|
||||||
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
|
if (CHUNK_CEILING(extent_size_get(extent)) > CHUNK_CEILING(usize_max)) {
|
||||||
if (!huge_ralloc_no_move_shrink(tsdn, extent, ptr, oldsize,
|
if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
|
||||||
usize_max)) {
|
|
||||||
arena_decay_tick(tsdn, extent_arena_get(extent));
|
arena_decay_tick(tsdn, extent_arena_get(extent));
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
@ -308,19 +324,20 @@ huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize,
|
||||||
size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache)
|
size_t alignment, bool zero, tcache_t *tcache)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
size_t copysize;
|
size_t copysize;
|
||||||
|
|
||||||
/* The following should have been caught by callers. */
|
/* The following should have been caught by callers. */
|
||||||
assert(usize > 0 && usize <= HUGE_MAXCLASS);
|
assert(usize > 0 && usize <= HUGE_MAXCLASS);
|
||||||
|
/* Both allocation sizes must be huge to avoid a move. */
|
||||||
|
assert(extent_size_get(extent) >= chunksize && usize >= chunksize);
|
||||||
|
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
if (!huge_ralloc_no_move(tsdn, extent, ptr, oldsize, usize, usize,
|
if (!huge_ralloc_no_move(tsdn, extent, usize, usize, zero))
|
||||||
zero))
|
return (extent_addr_get(extent));
|
||||||
return (ptr);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* usize and old size are different enough that we need to use a
|
* usize and old size are different enough that we need to use a
|
||||||
@ -331,14 +348,16 @@ huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
|||||||
if (ret == NULL)
|
if (ret == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
copysize = (usize < oldsize) ? usize : oldsize;
|
copysize = (usize < extent_size_get(extent)) ? usize :
|
||||||
memcpy(ret, ptr, copysize);
|
extent_size_get(extent);
|
||||||
isdalloct(tsdn, extent, ptr, oldsize, tcache, true);
|
memcpy(ret, extent_addr_get(extent), copysize);
|
||||||
|
isdalloct(tsdn, extent, extent_addr_get(extent),
|
||||||
|
extent_size_get(extent), tcache, true);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
|
huge_dalloc(tsdn_t *tsdn, extent_t *extent)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
@ -352,13 +371,13 @@ huge_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr)
|
|||||||
extent_size_get(extent));
|
extent_size_get(extent));
|
||||||
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
|
arena_chunk_dalloc_huge(tsdn, extent_arena_get(extent),
|
||||||
extent_addr_get(extent), extent_size_get(extent));
|
extent_addr_get(extent), extent_size_get(extent));
|
||||||
idalloctm(tsdn, iealloc(tsdn, extent), extent, NULL, true, true);
|
arena_extent_dalloc(tsdn, arena, extent);
|
||||||
|
|
||||||
arena_decay_tick(tsdn, arena);
|
arena_decay_tick(tsdn, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t
|
size_t
|
||||||
huge_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
huge_salloc(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
size_t size;
|
size_t size;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
@ -372,13 +391,11 @@ huge_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
prof_tctx_t *
|
prof_tctx_t *
|
||||||
huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent)
|
||||||
{
|
{
|
||||||
prof_tctx_t *tctx;
|
prof_tctx_t *tctx;
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
assert(extent == iealloc(tsdn, ptr));
|
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
arena = extent_arena_get(extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
tctx = extent_prof_tctx_get(extent);
|
tctx = extent_prof_tctx_get(extent);
|
||||||
@ -388,13 +405,10 @@ huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx)
|
||||||
prof_tctx_t *tctx)
|
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
|
|
||||||
assert(extent == iealloc(tsdn, ptr));
|
|
||||||
|
|
||||||
arena = extent_arena_get(extent);
|
arena = extent_arena_get(extent);
|
||||||
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
malloc_mutex_lock(tsdn, &arena->huge_mtx);
|
||||||
extent_prof_tctx_set(extent, tctx);
|
extent_prof_tctx_set(extent, tctx);
|
||||||
@ -402,8 +416,8 @@ huge_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent, const void *ptr)
|
huge_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent)
|
||||||
{
|
{
|
||||||
|
|
||||||
huge_prof_tctx_set(tsdn, extent, ptr, (prof_tctx_t *)(uintptr_t)1U);
|
huge_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user