Refactor huge_*() calls into arena internals.

Make redirects to the huge_*() API the arena code's responsibility,
since arenas now take responsibility for all allocation sizes.
This commit is contained in:
Jason Evans 2015-02-12 14:06:37 -08:00
parent 1eaf3b6f34
commit 88fef7ceda
4 changed files with 238 additions and 227 deletions

View File

@ -391,7 +391,8 @@ void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr, arena_chunk_map_bits_t *bitselm);
@ -481,8 +482,7 @@ void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
arena_t *arena_aalloc(const void *ptr);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
tcache_t *tcache);
void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
@ -931,20 +931,22 @@ arena_prof_tctx_get(const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
size_t pageind, mapbits;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else
ret = arena_miscelm_get(chunk, pageind)->prof_tctx;
} else
ret = huge_prof_tctx_get(ptr);
return (ret);
}
@ -953,18 +955,20 @@ JEMALLOC_INLINE void
arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0))
arena_miscelm_get(chunk, pageind)->prof_tctx = tctx;
} else
huge_prof_tctx_set(ptr, tctx);
}
JEMALLOC_ALWAYS_INLINE void *
@ -984,7 +988,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
return (NULL);
return (arena_malloc_small(arena, size, zero));
}
} else {
} else if (likely(size <= arena_maxclass)) {
/*
* Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization.
@ -997,7 +1001,8 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
return (NULL);
return (arena_malloc_large(arena, size, zero));
}
}
} else
return (huge_malloc(tsd, arena, size, zero, tcache));
}
JEMALLOC_ALWAYS_INLINE arena_t *
@ -1006,7 +1011,10 @@ arena_aalloc(const void *ptr)
arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
return (chunk->node.arena);
else
return (huge_aalloc(ptr));
}
/* Return the size of the allocation pointed to by ptr. */
@ -1022,29 +1030,37 @@ arena_salloc(const void *ptr, bool demote)
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
binind = arena_mapbits_binind_get(chunk, pageind);
if (unlikely(binind == BININD_INVALID || (config_prof && !demote &&
arena_mapbits_large_get(chunk, pageind) != 0))) {
if (unlikely(binind == BININD_INVALID || (config_prof && !demote
&& arena_mapbits_large_get(chunk, pageind) != 0))) {
/*
* Large allocation. In the common case (demote), and as this
* is an inline function, most callers will only end up looking
* at binind to determine that ptr is a small allocation.
* Large allocation. In the common case (demote), and
* as this is an inline function, most callers will only
* end up looking at binind to determine that ptr is a
* small allocation.
*/
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = arena_mapbits_large_size_get(chunk, pageind);
assert(ret != 0);
assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
arena_mapbits_dirty_get(chunk,
pageind+(ret>>LG_PAGE)-1));
} else {
/* Small allocation (possibly promoted to a large object). */
/*
* Small allocation (possibly promoted to a large
* object).
*/
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) == binind);
arena_ptr_small_binind_get(ptr,
arena_mapbits_get(chunk, pageind)) == binind);
ret = index2size(binind);
}
} else
ret = huge_salloc(ptr);
return (ret);
}
@ -1058,12 +1074,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (unlikely(chunk == ptr)) {
huge_dalloc(tsd, ptr, tcache);
return;
}
assert(CHUNK_ADDR2BASE(ptr) != ptr);
if (likely(chunk != ptr)) {
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
mapbits = arena_mapbits_get(chunk, pageind);
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
@ -1074,36 +1085,45 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
arena_dalloc_small(chunk->node.arena, chunk, ptr,
pageind);
arena_dalloc_small(chunk->node.arena, chunk,
ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk, pageind);
size_t size = arena_mapbits_large_size_get(chunk,
pageind);
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass)
tcache_dalloc_large(tsd, tcache, ptr, size);
else
arena_dalloc_large(chunk->node.arena, chunk, ptr);
else {
arena_dalloc_large(chunk->node.arena, chunk,
ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
tcache_t *tcache)
arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0) {
/* Make sure to use promoted size, not request size. */
/*
* Make sure to use promoted size, not request
* size.
*/
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
size = arena_mapbits_large_size_get(chunk, pageind);
size = arena_mapbits_large_size_get(chunk,
pageind);
}
}
assert(s2u(size) == s2u(arena_salloc(ptr, false)));
@ -1114,20 +1134,24 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
index_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE;
arena_dalloc_small(chunk->node.arena, chunk, ptr,
pageind);
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(chunk->node.arena, chunk,
ptr, pageind);
}
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass)
tcache_dalloc_large(tsd, tcache, ptr, size);
else
arena_dalloc_large(chunk->node.arena, chunk, ptr);
else {
arena_dalloc_large(chunk->node.arena, chunk,
ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif

View File

@ -823,18 +823,10 @@ bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
arena_t *arena;
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
arena = arena_aalloc(ptr);
else
arena = huge_aalloc(ptr);
return (arena);
return (arena_aalloc(ptr));
}
/*
@ -845,20 +837,12 @@ iaalloc(const void *ptr)
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
@ -869,10 +853,7 @@ iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata
assert(size != 0);
if (likely(size <= arena_maxclass))
ret = arena_malloc(tsd, arena, size, zero, tcache);
else
ret = huge_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
@ -917,21 +898,7 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
ret = arena_malloc(tsd, arena, usize, zero, tcache);
else {
if (likely(usize <= arena_maxclass)) {
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_palloc(arena, usize, alignment, zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
}
}
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
@ -1033,15 +1000,8 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
arena_sdalloc(tsd, chunk, ptr, size, tcache);
else
huge_dalloc(tsd, ptr, tcache);
arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
@ -1104,13 +1064,8 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
zero, tcache, arena));
}
if (likely(size <= arena_maxclass)) {
return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0,
alignment, zero, tcache));
} else {
return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0,
alignment, zero, tcache));
}
return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero,
tcache));
}
JEMALLOC_ALWAYS_INLINE void *
@ -1136,10 +1091,7 @@ ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
return (true);
}
if (likely(size <= arena_maxclass))
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
#endif

View File

@ -372,34 +372,21 @@ prof_tdata_get(tsd_t *tsd, bool create)
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
ret = arena_prof_tctx_get(ptr);
else
ret = huge_prof_tctx_get(ptr);
return (ret);
return (arena_prof_tctx_get(ptr));
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
cassert(config_prof);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
arena_prof_tctx_set(ptr, tctx);
else
huge_prof_tctx_set(ptr, tctx);
}
JEMALLOC_ALWAYS_INLINE bool

View File

@ -1714,8 +1714,9 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
}
/* Only handles large allocations that require more than page alignment. */
void *
arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
static void *
arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero)
{
void *ret;
size_t alloc_size, leadsize, trailsize;
@ -1726,6 +1727,10 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
assert((size & PAGE_MASK) == 0);
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
alloc_size = size + alignment - PAGE;
@ -1783,6 +1788,28 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
return (ret);
}
void *
arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
ret = arena_malloc(tsd, arena, usize, zero, tcache);
else {
if (likely(usize <= arena_maxclass)) {
ret = arena_palloc_large(tsd, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
}
}
return (ret);
}
void
arena_prof_promoted(const void *ptr, size_t size)
{
@ -2189,22 +2216,26 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
if (likely(size <= arena_maxclass)) {
/*
* Avoid moving the allocation if the size class can be left the same.
* Avoid moving the allocation if the size class can be left the
* same.
*/
if (likely(oldsize <= arena_maxclass)) {
if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[size2index(oldsize)].reg_size
assert(
arena_bin_info[size2index(oldsize)].reg_size
== oldsize);
if ((size + extra <= SMALL_MAXCLASS && size2index(size +
extra) == size2index(oldsize)) || (size <= oldsize
&& size + extra >= oldsize))
if ((size + extra <= SMALL_MAXCLASS &&
size2index(size + extra) ==
size2index(oldsize)) || (size <= oldsize &&
size + extra >= oldsize))
return (false);
} else {
assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) {
if (!arena_ralloc_large(ptr, oldsize, size,
extra, zero))
if (!arena_ralloc_large(ptr, oldsize,
size, extra, zero))
return (false);
}
}
@ -2212,6 +2243,8 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
/* Reallocation would require a move. */
return (true);
} else
return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
void *
@ -2219,6 +2252,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
if (likely(size <= arena_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
@ -2226,17 +2261,20 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
return (ptr);
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and
* copying.
* size and oldsize are different enough that we need to move
* the object. In that case, fall back to allocating new space
* and copying.
*/
if (alignment != 0) {
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipalloct(tsd, usize, alignment, zero, tcache, arena);
} else
ret = arena_malloc(tsd, arena, size + extra, zero, tcache);
ret = ipalloct(tsd, usize, alignment, zero, tcache,
arena);
} else {
ret = arena_malloc(tsd, arena, size + extra, zero,
tcache);
}
if (ret == NULL) {
if (extra == 0)
@ -2246,25 +2284,35 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipalloct(tsd, usize, alignment, zero, tcache,
arena);
} else
ret = arena_malloc(tsd, arena, size, zero, tcache);
ret = ipalloct(tsd, usize, alignment, zero,
tcache, arena);
} else {
ret = arena_malloc(tsd, arena, size, zero,
tcache);
}
if (ret == NULL)
return (NULL);
}
/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().
*/
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
alignment, zero, tcache);
}
return (ret);
}