Refactor huge_*() calls into arena internals.

Make redirects to the huge_*() API the arena code's responsibility,
since arenas now take responsibility for all allocation sizes.
This commit is contained in:
Jason Evans 2015-02-12 14:06:37 -08:00
parent 1eaf3b6f34
commit 88fef7ceda
4 changed files with 238 additions and 227 deletions

View File

@ -391,7 +391,8 @@ void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
void arena_quarantine_junk_small(void *ptr, size_t usize); void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
void arena_prof_promoted(const void *ptr, size_t size); void arena_prof_promoted(const void *ptr, size_t size);
void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr, arena_chunk_map_bits_t *bitselm); void *ptr, arena_chunk_map_bits_t *bitselm);
@ -481,8 +482,7 @@ void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
arena_t *arena_aalloc(const void *ptr); arena_t *arena_aalloc(const void *ptr);
size_t arena_salloc(const void *ptr, bool demote); size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
tcache_t *tcache);
#endif #endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
@ -931,20 +931,22 @@ arena_prof_tctx_get(const void *ptr)
{ {
prof_tctx_t *ret; prof_tctx_t *ret;
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t pageind, mapbits;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; if (likely(chunk != ptr)) {
mapbits = arena_mapbits_get(chunk, pageind); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); size_t mapbits = arena_mapbits_get(chunk, pageind);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
ret = (prof_tctx_t *)(uintptr_t)1U; if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
else ret = (prof_tctx_t *)(uintptr_t)1U;
ret = arena_miscelm_get(chunk, pageind)->prof_tctx; else
ret = arena_miscelm_get(chunk, pageind)->prof_tctx;
} else
ret = huge_prof_tctx_get(ptr);
return (ret); return (ret);
} }
@ -953,18 +955,20 @@ JEMALLOC_INLINE void
arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{ {
arena_chunk_t *chunk; arena_chunk_t *chunk;
size_t pageind;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; if (likely(chunk != ptr)) {
assert(arena_mapbits_allocated_get(chunk, pageind) != 0); size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0))
arena_miscelm_get(chunk, pageind)->prof_tctx = tctx; arena_miscelm_get(chunk, pageind)->prof_tctx = tctx;
} else
huge_prof_tctx_set(ptr, tctx);
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
@ -984,7 +988,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
return (NULL); return (NULL);
return (arena_malloc_small(arena, size, zero)); return (arena_malloc_small(arena, size, zero));
} }
} else { } else if (likely(size <= arena_maxclass)) {
/* /*
* Initialize tcache after checking size in order to avoid * Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization. * infinite recursion during tcache initialization.
@ -997,7 +1001,8 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
return (NULL); return (NULL);
return (arena_malloc_large(arena, size, zero)); return (arena_malloc_large(arena, size, zero));
} }
} } else
return (huge_malloc(tsd, arena, size, zero, tcache));
} }
JEMALLOC_ALWAYS_INLINE arena_t * JEMALLOC_ALWAYS_INLINE arena_t *
@ -1006,7 +1011,10 @@ arena_aalloc(const void *ptr)
arena_chunk_t *chunk; arena_chunk_t *chunk;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
return (chunk->node.arena); if (likely(chunk != ptr))
return (chunk->node.arena);
else
return (huge_aalloc(ptr));
} }
/* Return the size of the allocation pointed to by ptr. */ /* Return the size of the allocation pointed to by ptr. */
@ -1022,29 +1030,37 @@ arena_salloc(const void *ptr, bool demote)
assert(CHUNK_ADDR2BASE(ptr) != ptr); assert(CHUNK_ADDR2BASE(ptr) != ptr);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; if (likely(chunk != ptr)) {
assert(arena_mapbits_allocated_get(chunk, pageind) != 0); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
binind = arena_mapbits_binind_get(chunk, pageind); assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(binind == BININD_INVALID || (config_prof && !demote && binind = arena_mapbits_binind_get(chunk, pageind);
arena_mapbits_large_get(chunk, pageind) != 0))) { if (unlikely(binind == BININD_INVALID || (config_prof && !demote
/* && arena_mapbits_large_get(chunk, pageind) != 0))) {
* Large allocation. In the common case (demote), and as this /*
* is an inline function, most callers will only end up looking * Large allocation. In the common case (demote), and
* at binind to determine that ptr is a small allocation. * as this is an inline function, most callers will only
*/ * end up looking at binind to determine that ptr is a
assert(((uintptr_t)ptr & PAGE_MASK) == 0); * small allocation.
ret = arena_mapbits_large_size_get(chunk, pageind); */
assert(ret != 0); assert(((uintptr_t)ptr & PAGE_MASK) == 0);
assert(pageind + (ret>>LG_PAGE) <= chunk_npages); ret = arena_mapbits_large_size_get(chunk, pageind);
assert(arena_mapbits_dirty_get(chunk, pageind) == assert(ret != 0);
arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
} else { assert(arena_mapbits_dirty_get(chunk, pageind) ==
/* Small allocation (possibly promoted to a large object). */ arena_mapbits_dirty_get(chunk,
assert(arena_mapbits_large_get(chunk, pageind) != 0 || pageind+(ret>>LG_PAGE)-1));
arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, } else {
pageind)) == binind); /*
ret = index2size(binind); * Small allocation (possibly promoted to a large
} * object).
*/
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
arena_ptr_small_binind_get(ptr,
arena_mapbits_get(chunk, pageind)) == binind);
ret = index2size(binind);
}
} else
ret = huge_salloc(ptr);
return (ret); return (ret);
} }
@ -1058,75 +1074,83 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (unlikely(chunk == ptr)) { if (likely(chunk != ptr)) {
huge_dalloc(tsd, ptr, tcache); pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
return; mapbits = arena_mapbits_get(chunk, pageind);
} assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
assert(CHUNK_ADDR2BASE(ptr) != ptr); if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; if (likely(tcache != NULL)) {
mapbits = arena_mapbits_get(chunk, pageind); index_t binind = arena_ptr_small_binind_get(ptr,
assert(arena_mapbits_allocated_get(chunk, pageind) != 0); mapbits);
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { tcache_dalloc_small(tsd, tcache, ptr, binind);
/* Small allocation. */ } else {
if (likely(tcache != NULL)) { arena_dalloc_small(chunk->node.arena, chunk,
index_t binind = arena_ptr_small_binind_get(ptr, ptr, pageind);
mapbits); }
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else { } else {
arena_dalloc_small(chunk->node.arena, chunk, ptr, size_t size = arena_mapbits_large_size_get(chunk,
pageind); pageind);
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass)
tcache_dalloc_large(tsd, tcache, ptr, size);
else {
arena_dalloc_large(chunk->node.arena, chunk,
ptr);
}
} }
} else { } else
size_t size = arena_mapbits_large_size_get(chunk, pageind); huge_dalloc(tsd, ptr, tcache);
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass)
tcache_dalloc_large(tsd, tcache, ptr, size);
else
arena_dalloc_large(chunk->node.arena, chunk, ptr);
}
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
tcache_t *tcache)
{ {
arena_chunk_t *chunk;
assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
assert(CHUNK_ADDR2BASE(ptr) != ptr); if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
if (config_prof && opt_prof) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (arena_mapbits_large_get(chunk, pageind) != 0) {
/* Make sure to use promoted size, not request size. */
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
size = arena_mapbits_large_size_get(chunk, pageind);
}
}
assert(s2u(size) == s2u(arena_salloc(ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
index_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
LG_PAGE; LG_PAGE;
arena_dalloc_small(chunk->node.arena, chunk, ptr, assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
pageind); if (arena_mapbits_large_get(chunk, pageind) != 0) {
/*
* Make sure to use promoted size, not request
* size.
*/
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
size = arena_mapbits_large_size_get(chunk,
pageind);
}
} }
} else { assert(s2u(size) == s2u(arena_salloc(ptr, false)));
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass) if (likely(size <= SMALL_MAXCLASS)) {
tcache_dalloc_large(tsd, tcache, ptr, size); /* Small allocation. */
else if (likely(tcache != NULL)) {
arena_dalloc_large(chunk->node.arena, chunk, ptr); index_t binind = size2index(size);
} tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_dalloc_small(chunk->node.arena, chunk,
ptr, pageind);
}
} else {
assert(((uintptr_t)ptr & PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass)
tcache_dalloc_large(tsd, tcache, ptr, size);
else {
arena_dalloc_large(chunk->node.arena, chunk,
ptr);
}
}
} else
huge_dalloc(tsd, ptr, tcache);
} }
# endif /* JEMALLOC_ARENA_INLINE_B */ # endif /* JEMALLOC_ARENA_INLINE_B */
#endif #endif

View File

@ -823,18 +823,10 @@ bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
JEMALLOC_ALWAYS_INLINE arena_t * JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr) iaalloc(const void *ptr)
{ {
arena_t *arena;
arena_chunk_t *chunk;
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); return (arena_aalloc(ptr));
if (likely(chunk != ptr))
arena = arena_aalloc(ptr);
else
arena = huge_aalloc(ptr);
return (arena);
} }
/* /*
@ -845,20 +837,12 @@ iaalloc(const void *ptr)
JEMALLOC_ALWAYS_INLINE size_t JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote) isalloc(const void *ptr, bool demote)
{ {
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL); assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */ /* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote); assert(config_prof || !demote);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); return (arena_salloc(ptr, demote));
if (likely(chunk != ptr))
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
@ -869,10 +853,7 @@ iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata
assert(size != 0); assert(size != 0);
if (likely(size <= arena_maxclass)) ret = arena_malloc(tsd, arena, size, zero, tcache);
ret = arena_malloc(tsd, arena, size, zero, tcache);
else
ret = huge_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) { if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof)); config_prof));
@ -917,21 +898,7 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
assert(usize != 0); assert(usize != 0);
assert(usize == sa2u(usize, alignment)); assert(usize == sa2u(usize, alignment));
if (usize <= SMALL_MAXCLASS && alignment < PAGE) ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
ret = arena_malloc(tsd, arena, usize, zero, tcache);
else {
if (likely(usize <= arena_maxclass)) {
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_palloc(arena, usize, alignment, zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
}
}
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) { if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
@ -1033,15 +1000,8 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{ {
arena_chunk_t *chunk;
assert(ptr != NULL); arena_sdalloc(tsd, ptr, size, tcache);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
arena_sdalloc(tsd, chunk, ptr, size, tcache);
else
huge_dalloc(tsd, ptr, tcache);
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
@ -1104,13 +1064,8 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
zero, tcache, arena)); zero, tcache, arena));
} }
if (likely(size <= arena_maxclass)) { return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero,
return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, tcache));
alignment, zero, tcache));
} else {
return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0,
alignment, zero, tcache));
}
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
@ -1136,10 +1091,7 @@ ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
return (true); return (true);
} }
if (likely(size <= arena_maxclass)) return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
} }
#endif #endif

View File

@ -372,34 +372,21 @@ prof_tdata_get(tsd_t *tsd, bool create)
JEMALLOC_ALWAYS_INLINE prof_tctx_t * JEMALLOC_ALWAYS_INLINE prof_tctx_t *
prof_tctx_get(const void *ptr) prof_tctx_get(const void *ptr)
{ {
prof_tctx_t *ret;
arena_chunk_t *chunk;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); return (arena_prof_tctx_get(ptr));
if (likely(chunk != ptr))
ret = arena_prof_tctx_get(ptr);
else
ret = huge_prof_tctx_get(ptr);
return (ret);
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(const void *ptr, prof_tctx_t *tctx) prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
{ {
arena_chunk_t *chunk;
cassert(config_prof); cassert(config_prof);
assert(ptr != NULL); assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); arena_prof_tctx_set(ptr, tctx);
if (likely(chunk != ptr))
arena_prof_tctx_set(ptr, tctx);
else
huge_prof_tctx_set(ptr, tctx);
} }
JEMALLOC_ALWAYS_INLINE bool JEMALLOC_ALWAYS_INLINE bool

View File

@ -1714,8 +1714,9 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
} }
/* Only handles large allocations that require more than page alignment. */ /* Only handles large allocations that require more than page alignment. */
void * static void *
arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
bool zero)
{ {
void *ret; void *ret;
size_t alloc_size, leadsize, trailsize; size_t alloc_size, leadsize, trailsize;
@ -1726,6 +1727,10 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
assert((size & PAGE_MASK) == 0); assert((size & PAGE_MASK) == 0);
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment); alignment = PAGE_CEILING(alignment);
alloc_size = size + alignment - PAGE; alloc_size = size + alignment - PAGE;
@ -1783,6 +1788,28 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
return (ret); return (ret);
} }
void *
arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
ret = arena_malloc(tsd, arena, usize, zero, tcache);
else {
if (likely(usize <= arena_maxclass)) {
ret = arena_palloc_large(tsd, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
}
}
return (ret);
}
void void
arena_prof_promoted(const void *ptr, size_t size) arena_prof_promoted(const void *ptr, size_t size)
{ {
@ -2189,29 +2216,35 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero) bool zero)
{ {
/* if (likely(size <= arena_maxclass)) {
* Avoid moving the allocation if the size class can be left the same. /*
*/ * Avoid moving the allocation if the size class can be left the
if (likely(oldsize <= arena_maxclass)) { * same.
if (oldsize <= SMALL_MAXCLASS) { */
assert(arena_bin_info[size2index(oldsize)].reg_size if (likely(oldsize <= arena_maxclass)) {
== oldsize); if (oldsize <= SMALL_MAXCLASS) {
if ((size + extra <= SMALL_MAXCLASS && size2index(size + assert(
extra) == size2index(oldsize)) || (size <= oldsize arena_bin_info[size2index(oldsize)].reg_size
&& size + extra >= oldsize)) == oldsize);
return (false); if ((size + extra <= SMALL_MAXCLASS &&
} else { size2index(size + extra) ==
assert(size <= arena_maxclass); size2index(oldsize)) || (size <= oldsize &&
if (size + extra > SMALL_MAXCLASS) { size + extra >= oldsize))
if (!arena_ralloc_large(ptr, oldsize, size,
extra, zero))
return (false); return (false);
} else {
assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) {
if (!arena_ralloc_large(ptr, oldsize,
size, extra, zero))
return (false);
}
} }
} }
}
/* Reallocation would require a move. */ /* Reallocation would require a move. */
return (true); return (true);
} else
return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
} }
void * void *
@ -2219,52 +2252,67 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache) size_t extra, size_t alignment, bool zero, tcache_t *tcache)
{ {
void *ret; void *ret;
size_t copysize;
/* Try to avoid moving the allocation. */ if (likely(size <= arena_maxclass)) {
if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) size_t copysize;
return (ptr);
/* /* Try to avoid moving the allocation. */
* size and oldsize are different enough that we need to move the if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
* object. In that case, fall back to allocating new space and return (ptr);
* copying.
*/
if (alignment != 0) {
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
ret = ipalloct(tsd, usize, alignment, zero, tcache, arena);
} else
ret = arena_malloc(tsd, arena, size + extra, zero, tcache);
if (ret == NULL) { /*
if (extra == 0) * size and oldsize are different enough that we need to move
return (NULL); * the object. In that case, fall back to allocating new space
/* Try again, this time without extra. */ * and copying.
*/
if (alignment != 0) { if (alignment != 0) {
size_t usize = sa2u(size, alignment); size_t usize = sa2u(size + extra, alignment);
if (usize == 0) if (usize == 0)
return (NULL); return (NULL);
ret = ipalloct(tsd, usize, alignment, zero, tcache, ret = ipalloct(tsd, usize, alignment, zero, tcache,
arena); arena);
} else } else {
ret = arena_malloc(tsd, arena, size, zero, tcache); ret = arena_malloc(tsd, arena, size + extra, zero,
tcache);
}
if (ret == NULL) if (ret == NULL) {
return (NULL); if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
if (alignment != 0) {
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
ret = ipalloct(tsd, usize, alignment, zero,
tcache, arena);
} else {
ret = arena_malloc(tsd, arena, size, zero,
tcache);
}
if (ret == NULL)
return (NULL);
}
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().
*/
/*
* Copy at most size bytes (not size+extra), since the caller
* has no expectation that the extra bytes will be reliably
* preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
alignment, zero, tcache);
} }
/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
/*
* Copy at most size bytes (not size+extra), since the caller has no
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
return (ret); return (ret);
} }