diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 2ae4609e..77a7dcb6 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -391,7 +391,8 @@ void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); void arena_quarantine_junk_small(void *ptr, size_t usize); void *arena_malloc_small(arena_t *arena, size_t size, bool zero); void *arena_malloc_large(arena_t *arena, size_t size, bool zero); -void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); +void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); void arena_prof_promoted(const void *ptr, size_t size); void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm); @@ -481,8 +482,7 @@ void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, arena_t *arena_aalloc(const void *ptr); size_t arena_salloc(const void *ptr, bool demote); void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache); -void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, - tcache_t *tcache); +void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) @@ -931,20 +931,22 @@ arena_prof_tctx_get(const void *ptr) { prof_tctx_t *ret; arena_chunk_t *chunk; - size_t pageind, mapbits; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) - ret = (prof_tctx_t *)(uintptr_t)1U; - else - ret = arena_miscelm_get(chunk, pageind)->prof_tctx; + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t mapbits = arena_mapbits_get(chunk, pageind); + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) + ret = (prof_tctx_t *)(uintptr_t)1U; + else + ret = arena_miscelm_get(chunk, pageind)->prof_tctx; + } else + ret = huge_prof_tctx_get(ptr); return (ret); } @@ -953,18 +955,20 @@ JEMALLOC_INLINE void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) { arena_chunk_t *chunk; - size_t pageind; cassert(config_prof); assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (likely(chunk != ptr)) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) - arena_miscelm_get(chunk, pageind)->prof_tctx = tctx; + if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) + arena_miscelm_get(chunk, pageind)->prof_tctx = tctx; + } else + huge_prof_tctx_set(ptr, tctx); } JEMALLOC_ALWAYS_INLINE void * @@ -984,7 +988,7 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, return (NULL); return (arena_malloc_small(arena, size, zero)); } - } else { + } else if (likely(size <= arena_maxclass)) { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. @@ -997,7 +1001,8 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, return (NULL); return (arena_malloc_large(arena, size, zero)); } - } + } else + return (huge_malloc(tsd, arena, size, zero, tcache)); } JEMALLOC_ALWAYS_INLINE arena_t * @@ -1006,7 +1011,10 @@ arena_aalloc(const void *ptr) arena_chunk_t *chunk; chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - return (chunk->node.arena); + if (likely(chunk != ptr)) + return (chunk->node.arena); + else + return (huge_aalloc(ptr)); } /* Return the size of the allocation pointed to by ptr. */ @@ -1022,29 +1030,37 @@ arena_salloc(const void *ptr, bool demote) assert(CHUNK_ADDR2BASE(ptr) != ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - binind = arena_mapbits_binind_get(chunk, pageind); - if (unlikely(binind == BININD_INVALID || (config_prof && !demote && - arena_mapbits_large_get(chunk, pageind) != 0))) { - /* - * Large allocation. In the common case (demote), and as this - * is an inline function, most callers will only end up looking - * at binind to determine that ptr is a small allocation. - */ - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - ret = arena_mapbits_large_size_get(chunk, pageind); - assert(ret != 0); - assert(pageind + (ret>>LG_PAGE) <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); - } else { - /* Small allocation (possibly promoted to a large object). */ - assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) == binind); - ret = index2size(binind); - } + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + binind = arena_mapbits_binind_get(chunk, pageind); + if (unlikely(binind == BININD_INVALID || (config_prof && !demote + && arena_mapbits_large_get(chunk, pageind) != 0))) { + /* + * Large allocation. In the common case (demote), and + * as this is an inline function, most callers will only + * end up looking at binind to determine that ptr is a + * small allocation. + */ + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + ret = arena_mapbits_large_size_get(chunk, pageind); + assert(ret != 0); + assert(pageind + (ret>>LG_PAGE) <= chunk_npages); + assert(arena_mapbits_dirty_get(chunk, pageind) == + arena_mapbits_dirty_get(chunk, + pageind+(ret>>LG_PAGE)-1)); + } else { + /* + * Small allocation (possibly promoted to a large + * object). + */ + assert(arena_mapbits_large_get(chunk, pageind) != 0 || + arena_ptr_small_binind_get(ptr, + arena_mapbits_get(chunk, pageind)) == binind); + ret = index2size(binind); + } + } else + ret = huge_salloc(ptr); return (ret); } @@ -1058,75 +1074,83 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (unlikely(chunk == ptr)) { - huge_dalloc(tsd, ptr, tcache); - return; - } - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { - /* Small allocation. */ - if (likely(tcache != NULL)) { - index_t binind = arena_ptr_small_binind_get(ptr, - mapbits); - tcache_dalloc_small(tsd, tcache, ptr, binind); + if (likely(chunk != ptr)) { + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = arena_mapbits_get(chunk, pageind); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + index_t binind = arena_ptr_small_binind_get(ptr, + mapbits); + tcache_dalloc_small(tsd, tcache, ptr, binind); + } else { + arena_dalloc_small(chunk->node.arena, chunk, + ptr, pageind); + } } else { - arena_dalloc_small(chunk->node.arena, chunk, ptr, + size_t size = arena_mapbits_large_size_get(chunk, pageind); + + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size <= tcache_maxclass) + tcache_dalloc_large(tsd, tcache, ptr, size); + else { + arena_dalloc_large(chunk->node.arena, chunk, + ptr); + } } - } else { - size_t size = arena_mapbits_large_size_get(chunk, pageind); - - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - - if (likely(tcache != NULL) && size <= tcache_maxclass) - tcache_dalloc_large(tsd, tcache, ptr, size); - else - arena_dalloc_large(chunk->node.arena, chunk, ptr); - } + } else + huge_dalloc(tsd, ptr, tcache); } JEMALLOC_ALWAYS_INLINE void -arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size, - tcache_t *tcache) +arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) { + arena_chunk_t *chunk; - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - if (config_prof && opt_prof) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (arena_mapbits_large_get(chunk, pageind) != 0) { - /* Make sure to use promoted size, not request size. */ - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - size = arena_mapbits_large_size_get(chunk, pageind); - } - } - assert(s2u(size) == s2u(arena_salloc(ptr, false))); - - if (likely(size <= SMALL_MAXCLASS)) { - /* Small allocation. */ - if (likely(tcache != NULL)) { - index_t binind = size2index(size); - tcache_dalloc_small(tsd, tcache, ptr, binind); - } else { + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (likely(chunk != ptr)) { + if (config_prof && opt_prof) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - arena_dalloc_small(chunk->node.arena, chunk, ptr, - pageind); + assert(arena_mapbits_allocated_get(chunk, pageind) != 0); + if (arena_mapbits_large_get(chunk, pageind) != 0) { + /* + * Make sure to use promoted size, not request + * size. + */ + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + size = arena_mapbits_large_size_get(chunk, + pageind); + } } - } else { - assert(((uintptr_t)ptr & PAGE_MASK) == 0); + assert(s2u(size) == s2u(arena_salloc(ptr, false))); - if (likely(tcache != NULL) && size <= tcache_maxclass) - tcache_dalloc_large(tsd, tcache, ptr, size); - else - arena_dalloc_large(chunk->node.arena, chunk, ptr); - } + if (likely(size <= SMALL_MAXCLASS)) { + /* Small allocation. */ + if (likely(tcache != NULL)) { + index_t binind = size2index(size); + tcache_dalloc_small(tsd, tcache, ptr, binind); + } else { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_dalloc_small(chunk->node.arena, chunk, + ptr, pageind); + } + } else { + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + + if (likely(tcache != NULL) && size <= tcache_maxclass) + tcache_dalloc_large(tsd, tcache, ptr, size); + else { + arena_dalloc_large(chunk->node.arena, chunk, + ptr); + } + } + } else + huge_dalloc(tsd, ptr, tcache); } # endif /* JEMALLOC_ARENA_INLINE_B */ #endif diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index ab93aa52..43276c6c 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -823,18 +823,10 @@ bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, JEMALLOC_ALWAYS_INLINE arena_t * iaalloc(const void *ptr) { - arena_t *arena; - arena_chunk_t *chunk; assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - arena = arena_aalloc(ptr); - else - arena = huge_aalloc(ptr); - - return (arena); + return (arena_aalloc(ptr)); } /* @@ -845,20 +837,12 @@ iaalloc(const void *ptr) JEMALLOC_ALWAYS_INLINE size_t isalloc(const void *ptr, bool demote) { - size_t ret; - arena_chunk_t *chunk; assert(ptr != NULL); /* Demotion only makes sense if config_prof is true. */ assert(config_prof || !demote); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - ret = arena_salloc(ptr, demote); - else - ret = huge_salloc(ptr); - - return (ret); + return (arena_salloc(ptr, demote)); } JEMALLOC_ALWAYS_INLINE void * @@ -869,10 +853,7 @@ iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata assert(size != 0); - if (likely(size <= arena_maxclass)) - ret = arena_malloc(tsd, arena, size, zero, tcache); - else - ret = huge_malloc(tsd, arena, size, zero, tcache); + ret = arena_malloc(tsd, arena, size, zero, tcache); if (config_stats && is_metadata && likely(ret != NULL)) { arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, config_prof)); @@ -917,21 +898,7 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero, assert(usize != 0); assert(usize == sa2u(usize, alignment)); - if (usize <= SMALL_MAXCLASS && alignment < PAGE) - ret = arena_malloc(tsd, arena, usize, zero, tcache); - else { - if (likely(usize <= arena_maxclass)) { - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); - ret = arena_palloc(arena, usize, alignment, zero); - } else if (likely(alignment <= chunksize)) - ret = huge_malloc(tsd, arena, usize, zero, tcache); - else { - ret = huge_palloc(tsd, arena, usize, alignment, zero, - tcache); - } - } + ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache); assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); if (config_stats && is_metadata && likely(ret != NULL)) { arena_metadata_allocated_add(iaalloc(ret), isalloc(ret, @@ -1033,15 +1000,8 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache) JEMALLOC_ALWAYS_INLINE void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache) { - arena_chunk_t *chunk; - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - arena_sdalloc(tsd, chunk, ptr, size, tcache); - else - huge_dalloc(tsd, ptr, tcache); + arena_sdalloc(tsd, ptr, size, tcache); } JEMALLOC_ALWAYS_INLINE void @@ -1104,13 +1064,8 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, zero, tcache, arena)); } - if (likely(size <= arena_maxclass)) { - return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, - alignment, zero, tcache)); - } else { - return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0, - alignment, zero, tcache)); - } + return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero, + tcache)); } JEMALLOC_ALWAYS_INLINE void * @@ -1136,10 +1091,7 @@ ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, return (true); } - if (likely(size <= arena_maxclass)) - return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); - else - return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); + return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); } #endif diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index b2db6859..f5082438 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -372,34 +372,21 @@ prof_tdata_get(tsd_t *tsd, bool create) JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_tctx_get(const void *ptr) { - prof_tctx_t *ret; - arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - ret = arena_prof_tctx_get(ptr); - else - ret = huge_prof_tctx_get(ptr); - - return (ret); + return (arena_prof_tctx_get(ptr)); } JEMALLOC_ALWAYS_INLINE void prof_tctx_set(const void *ptr, prof_tctx_t *tctx) { - arena_chunk_t *chunk; cassert(config_prof); assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (likely(chunk != ptr)) - arena_prof_tctx_set(ptr, tctx); - else - huge_prof_tctx_set(ptr, tctx); + arena_prof_tctx_set(ptr, tctx); } JEMALLOC_ALWAYS_INLINE bool diff --git a/src/arena.c b/src/arena.c index 2bd1a2c0..7b441bed 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1714,8 +1714,9 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) } /* Only handles large allocations that require more than page alignment. */ -void * -arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) +static void * +arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, + bool zero) { void *ret; size_t alloc_size, leadsize, trailsize; @@ -1726,6 +1727,10 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) assert((size & PAGE_MASK) == 0); + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + alignment = PAGE_CEILING(alignment); alloc_size = size + alignment - PAGE; @@ -1783,6 +1788,28 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) return (ret); } +void * +arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) +{ + void *ret; + + if (usize <= SMALL_MAXCLASS && alignment < PAGE) + ret = arena_malloc(tsd, arena, usize, zero, tcache); + else { + if (likely(usize <= arena_maxclass)) { + ret = arena_palloc_large(tsd, arena, usize, alignment, + zero); + } else if (likely(alignment <= chunksize)) + ret = huge_malloc(tsd, arena, usize, zero, tcache); + else { + ret = huge_palloc(tsd, arena, usize, alignment, zero, + tcache); + } + } + return (ret); +} + void arena_prof_promoted(const void *ptr, size_t size) { @@ -2189,29 +2216,35 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (likely(oldsize <= arena_maxclass)) { - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[size2index(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && size2index(size + - extra) == size2index(oldsize)) || (size <= oldsize - && size + extra >= oldsize)) - return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (!arena_ralloc_large(ptr, oldsize, size, - extra, zero)) + if (likely(size <= arena_maxclass)) { + /* + * Avoid moving the allocation if the size class can be left the + * same. + */ + if (likely(oldsize <= arena_maxclass)) { + if (oldsize <= SMALL_MAXCLASS) { + assert( + arena_bin_info[size2index(oldsize)].reg_size + == oldsize); + if ((size + extra <= SMALL_MAXCLASS && + size2index(size + extra) == + size2index(oldsize)) || (size <= oldsize && + size + extra >= oldsize)) return (false); + } else { + assert(size <= arena_maxclass); + if (size + extra > SMALL_MAXCLASS) { + if (!arena_ralloc_large(ptr, oldsize, + size, extra, zero)) + return (false); + } } } - } - /* Reallocation would require a move. */ - return (true); + /* Reallocation would require a move. */ + return (true); + } else + return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); } void * @@ -2219,52 +2252,67 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache) { void *ret; - size_t copysize; - /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) - return (ptr); + if (likely(size <= arena_maxclass)) { + size_t copysize; - /* - * size and oldsize are different enough that we need to move the - * object. In that case, fall back to allocating new space and - * copying. - */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(tsd, usize, alignment, zero, tcache, arena); - } else - ret = arena_malloc(tsd, arena, size + extra, zero, tcache); + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) + return (ptr); - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ + /* + * size and oldsize are different enough that we need to move + * the object. In that case, fall back to allocating new space + * and copying. + */ if (alignment != 0) { - size_t usize = sa2u(size, alignment); + size_t usize = sa2u(size + extra, alignment); if (usize == 0) return (NULL); ret = ipalloct(tsd, usize, alignment, zero, tcache, arena); - } else - ret = arena_malloc(tsd, arena, size, zero, tcache); + } else { + ret = arena_malloc(tsd, arena, size + extra, zero, + tcache); + } - if (ret == NULL) - return (NULL); + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, this time without extra. */ + if (alignment != 0) { + size_t usize = sa2u(size, alignment); + if (usize == 0) + return (NULL); + ret = ipalloct(tsd, usize, alignment, zero, + tcache, arena); + } else { + ret = arena_malloc(tsd, arena, size, zero, + tcache); + } + + if (ret == NULL) + return (NULL); + } + + /* + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). + */ + + /* + * Copy at most size bytes (not size+extra), since the caller + * has no expectation that the extra bytes will be reliably + * preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); + memcpy(ret, ptr, copysize); + isqalloc(tsd, ptr, oldsize, tcache); + } else { + ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra, + alignment, zero, tcache); } - - /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); - memcpy(ret, ptr, copysize); - isqalloc(tsd, ptr, oldsize, tcache); return (ret); }