Add support for sized deallocation.
This adds a new `sdallocx` function to the external API, allowing the size to be passed by the caller. It avoids some extra reads in the thread cache fast path. In the case where stats are enabled, this avoids the work of calculating the size from the pointer. An assertion validates the size that's passed in, so enabling debugging will allow users of the API to debug cases where an incorrect size is passed in. The performance win for a contrived microbenchmark doing an allocation and immediately freeing it is ~10%. It may have a different impact on a real workload. Closes #28
This commit is contained in:
committed by
Jason Evans
parent
c3f8650749
commit
4cfe55166e
@@ -488,6 +488,7 @@ void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
|
||||
size_t arena_salloc(const void *ptr, bool demote);
|
||||
void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache);
|
||||
void arena_sdalloc(arena_chunk_t *chunk, void *ptr, size_t size, bool try_tcache);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
@@ -1139,9 +1140,7 @@ arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache)
|
||||
if ((mapbits & CHUNK_MAP_LARGE) == 0) {
|
||||
/* Small allocation. */
|
||||
if (try_tcache && (tcache = tcache_get(false)) != NULL) {
|
||||
size_t binind;
|
||||
|
||||
binind = arena_ptr_small_binind_get(ptr, mapbits);
|
||||
size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
|
||||
tcache_dalloc_small(tcache, ptr, binind);
|
||||
} else
|
||||
arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
|
||||
@@ -1157,6 +1156,34 @@ arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache)
|
||||
arena_dalloc_large(chunk->arena, chunk, ptr);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_sdalloc(arena_chunk_t *chunk, void *ptr, size_t size, bool try_tcache)
|
||||
{
|
||||
tcache_t *tcache;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
if (size < PAGE) {
|
||||
/* Small allocation. */
|
||||
if (try_tcache && (tcache = tcache_get(false)) != NULL) {
|
||||
size_t binind = small_size2bin(size);
|
||||
tcache_dalloc_small(tcache, ptr, binind);
|
||||
} else {
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_dalloc_small(chunk->arena, chunk, ptr, pageind);
|
||||
}
|
||||
} else {
|
||||
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
|
||||
|
||||
if (try_tcache && size <= tcache_maxclass && (tcache =
|
||||
tcache_get(false)) != NULL) {
|
||||
tcache_dalloc_large(tcache, ptr, size);
|
||||
} else
|
||||
arena_dalloc_large(chunk->arena, chunk, ptr);
|
||||
}
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_C */
|
||||
#endif
|
||||
|
||||
|
@@ -634,8 +634,10 @@ size_t ivsalloc(const void *ptr, bool demote);
|
||||
size_t u2rz(size_t usize);
|
||||
size_t p2rz(const void *ptr);
|
||||
void idalloct(void *ptr, bool try_tcache);
|
||||
void isdalloct(void *ptr, size_t size, bool try_tcache);
|
||||
void idalloc(void *ptr);
|
||||
void iqalloc(void *ptr, bool try_tcache);
|
||||
void isqalloc(void *ptr, size_t size, bool try_tcache);
|
||||
void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
|
||||
arena_t *arena);
|
||||
@@ -787,6 +789,20 @@ idalloct(void *ptr, bool try_tcache)
|
||||
huge_dalloc(ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isdalloct(void *ptr, size_t size, bool try_tcache)
|
||||
{
|
||||
arena_chunk_t *chunk;
|
||||
|
||||
assert(ptr != NULL);
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
if (chunk != ptr)
|
||||
arena_sdalloc(chunk, ptr, size, try_tcache);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
idalloc(void *ptr)
|
||||
{
|
||||
@@ -804,6 +820,16 @@ iqalloc(void *ptr, bool try_tcache)
|
||||
idalloct(ptr, try_tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
isqalloc(void *ptr, size_t size, bool try_tcache)
|
||||
{
|
||||
|
||||
if (config_fill && opt_quarantine)
|
||||
quarantine(ptr);
|
||||
else
|
||||
idalloct(ptr, try_tcache);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
|
||||
size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
|
||||
|
@@ -61,6 +61,7 @@ arena_ralloc_no_move
|
||||
arena_redzone_corruption
|
||||
arena_run_regind
|
||||
arena_salloc
|
||||
arena_sdalloc
|
||||
arena_stats_merge
|
||||
arena_tcache_fill_small
|
||||
arenas
|
||||
@@ -228,7 +229,9 @@ iralloc
|
||||
iralloct
|
||||
iralloct_realign
|
||||
isalloc
|
||||
isdalloct
|
||||
isthreaded
|
||||
isqalloc
|
||||
ivsalloc
|
||||
ixalloc
|
||||
jemalloc_postfork_child
|
||||
|
@@ -25,6 +25,7 @@ JEMALLOC_EXPORT size_t @je_@xallocx(void *ptr, size_t size, size_t extra,
|
||||
JEMALLOC_EXPORT size_t @je_@sallocx(const void *ptr, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
JEMALLOC_EXPORT void @je_@dallocx(void *ptr, int flags);
|
||||
JEMALLOC_EXPORT void @je_@sdallocx(void *ptr, size_t size, int flags);
|
||||
JEMALLOC_EXPORT size_t @je_@nallocx(size_t size, int flags)
|
||||
JEMALLOC_ATTR(pure);
|
||||
|
||||
|
Reference in New Issue
Block a user