Normalize size classes.
Normalize size classes to use the same number of size classes per size doubling (currently hard coded to 4), across the intire range of size classes. Small size classes already used this spacing, but in order to support this change, additional small size classes now fill [4 KiB .. 16 KiB). Large size classes range from [16 KiB .. 4 MiB). Huge size classes now support non-multiples of the chunk size in order to fill (4 MiB .. 16 MiB).
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_TYPES
|
||||
|
||||
#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS)
|
||||
|
||||
/* Maximum number of regions in one run. */
|
||||
#define LG_RUN_MAXREGS (LG_PAGE - LG_TINY_MIN)
|
||||
#define RUN_MAXREGS (1U << LG_RUN_MAXREGS)
|
||||
@@ -96,11 +98,15 @@ struct arena_chunk_map_bits_s {
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* -------- -------- ----++++ ++++D-LA
|
||||
*
|
||||
* Large (sampled, size <= PAGE):
|
||||
* Large (sampled, size <= LARGE_MINCLASS):
|
||||
* ssssssss ssssssss ssssnnnn nnnnD-LA
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* -------- -------- ----++++ ++++D-LA
|
||||
*
|
||||
* Large (not sampled, size == PAGE):
|
||||
* Large (not sampled, size == LARGE_MINCLASS):
|
||||
* ssssssss ssssssss ssss++++ ++++D-LA
|
||||
* xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx
|
||||
* -------- -------- ----++++ ++++D-LA
|
||||
*/
|
||||
size_t bits;
|
||||
#define CHUNK_MAP_BININD_SHIFT 4
|
||||
@@ -325,30 +331,21 @@ struct arena_s {
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
extern ssize_t opt_lg_dirty_mult;
|
||||
/*
|
||||
* small_size2bin_tab is a compact lookup table that rounds request sizes up to
|
||||
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||
* and all accesses are via small_size2bin().
|
||||
*/
|
||||
extern uint8_t const small_size2bin_tab[];
|
||||
/*
|
||||
* small_bin2size_tab duplicates information in arena_bin_info, but in a const
|
||||
* array, for which it is easier for the compiler to optimize repeated
|
||||
* dereferences.
|
||||
*/
|
||||
extern uint32_t const small_bin2size_tab[NBINS];
|
||||
|
||||
extern arena_bin_info_t arena_bin_info[NBINS];
|
||||
|
||||
/* Number of large size classes. */
|
||||
#define nlclasses (chunk_npages - map_bias)
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t map_misc_offset;
|
||||
extern size_t arena_maxrun; /* Max run size for arenas. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
extern size_t nlclasses; /* Number of large size classes. */
|
||||
|
||||
void *arena_chunk_alloc_huge(arena_t *arena, void *new_addr, size_t size,
|
||||
size_t alignment, bool *zero);
|
||||
void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size);
|
||||
void arena_purge_all(arena_t *arena);
|
||||
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
|
||||
size_t binind, uint64_t prof_accumbytes);
|
||||
index_t binind, uint64_t prof_accumbytes);
|
||||
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
|
||||
bool zero);
|
||||
#ifdef JEMALLOC_JET
|
||||
@@ -403,15 +400,6 @@ void arena_postfork_child(arena_t *arena);
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t small_size2bin_compute(size_t size);
|
||||
size_t small_size2bin_lookup(size_t size);
|
||||
size_t small_size2bin(size_t size);
|
||||
size_t small_bin2size_compute(size_t binind);
|
||||
size_t small_bin2size_lookup(size_t binind);
|
||||
size_t small_bin2size(size_t binind);
|
||||
size_t small_s2u_compute(size_t size);
|
||||
size_t small_s2u_lookup(size_t size);
|
||||
size_t small_s2u(size_t size);
|
||||
arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
|
||||
size_t pageind);
|
||||
arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
|
||||
@@ -426,7 +414,7 @@ size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
|
||||
size_t pageind);
|
||||
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
|
||||
index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
|
||||
@@ -439,16 +427,16 @@ void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
|
||||
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t size, size_t flags);
|
||||
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t binind);
|
||||
index_t binind);
|
||||
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t runind, size_t binind, size_t flags);
|
||||
size_t runind, index_t binind, size_t flags);
|
||||
void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t unzeroed);
|
||||
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
|
||||
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
|
||||
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
|
||||
size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
||||
size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||
index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
|
||||
index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
|
||||
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
|
||||
const void *ptr);
|
||||
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
|
||||
@@ -464,148 +452,6 @@ void arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
# ifdef JEMALLOC_ARENA_INLINE_A
|
||||
JEMALLOC_INLINE size_t
|
||||
small_size2bin_compute(size_t size)
|
||||
{
|
||||
#if (NTBINS != 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||
size_t lg_ceil = lg_floor(pow2_ceil(size));
|
||||
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
size_t x = lg_floor((size<<1)-1);
|
||||
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
|
||||
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
|
||||
size_t grp = shift << LG_SIZE_CLASS_GROUP;
|
||||
|
||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||
|
||||
size_t delta_inverse_mask = ZI(-1) << lg_delta;
|
||||
size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
||||
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||
|
||||
size_t bin = NTBINS + grp + mod;
|
||||
return (bin);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_size2bin_lookup(size_t size)
|
||||
{
|
||||
|
||||
assert(size <= LOOKUP_MAXCLASS);
|
||||
{
|
||||
size_t ret = ((size_t)(small_size2bin_tab[(size-1) >>
|
||||
LG_TINY_MIN]));
|
||||
assert(ret == small_size2bin_compute(size));
|
||||
return (ret);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_size2bin(size_t size)
|
||||
{
|
||||
|
||||
assert(size > 0);
|
||||
if (likely(size <= LOOKUP_MAXCLASS))
|
||||
return (small_size2bin_lookup(size));
|
||||
else
|
||||
return (small_size2bin_compute(size));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
small_bin2size_compute(size_t binind)
|
||||
{
|
||||
#if (NTBINS > 0)
|
||||
if (binind < NTBINS)
|
||||
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + binind));
|
||||
else
|
||||
#endif
|
||||
{
|
||||
size_t reduced_binind = binind - NTBINS;
|
||||
size_t grp = reduced_binind >> LG_SIZE_CLASS_GROUP;
|
||||
size_t mod = reduced_binind & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
|
||||
1);
|
||||
|
||||
size_t grp_size_mask = ~((!!grp)-1);
|
||||
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
|
||||
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
||||
|
||||
size_t shift = (grp == 0) ? 1 : grp;
|
||||
size_t lg_delta = shift + (LG_QUANTUM-1);
|
||||
size_t mod_size = (mod+1) << lg_delta;
|
||||
|
||||
size_t usize = grp_size + mod_size;
|
||||
return (usize);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_bin2size_lookup(size_t binind)
|
||||
{
|
||||
|
||||
assert(binind < NBINS);
|
||||
{
|
||||
size_t ret = (size_t)small_bin2size_tab[binind];
|
||||
assert(ret == small_bin2size_compute(binind));
|
||||
return (ret);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_bin2size(size_t binind)
|
||||
{
|
||||
|
||||
return (small_bin2size_lookup(binind));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_s2u_compute(size_t size)
|
||||
{
|
||||
#if (NTBINS > 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||
size_t lg_ceil = lg_floor(pow2_ceil(size));
|
||||
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
|
||||
(ZU(1) << lg_ceil));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
size_t x = lg_floor((size<<1)-1);
|
||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||
size_t delta = ZU(1) << lg_delta;
|
||||
size_t delta_mask = delta - 1;
|
||||
size_t usize = (size + delta_mask) & ~delta_mask;
|
||||
return (usize);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_s2u_lookup(size_t size)
|
||||
{
|
||||
size_t ret = small_bin2size(small_size2bin(size));
|
||||
|
||||
assert(ret == small_s2u_compute(size));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_s2u(size_t size)
|
||||
{
|
||||
|
||||
assert(size > 0);
|
||||
if (likely(size <= LOOKUP_MAXCLASS))
|
||||
return (small_s2u_lookup(size));
|
||||
else
|
||||
return (small_s2u_compute(size));
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||
|
||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
|
||||
arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
@@ -714,11 +560,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
|
||||
return (mapbits >> LG_PAGE);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
JEMALLOC_ALWAYS_INLINE index_t
|
||||
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
size_t mapbits;
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
||||
@@ -810,20 +656,20 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
|
||||
size_t binind)
|
||||
index_t binind)
|
||||
{
|
||||
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
||||
|
||||
assert(binind <= BININD_INVALID);
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
|
||||
assert(arena_mapbits_large_size_get(chunk, pageind) == LARGE_MINCLASS);
|
||||
arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
|
||||
(binind << CHUNK_MAP_BININD_SHIFT));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
|
||||
size_t binind, size_t flags)
|
||||
index_t binind, size_t flags)
|
||||
{
|
||||
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
|
||||
size_t mapbits = arena_mapbitsp_read(mapbitsp);
|
||||
@@ -893,10 +739,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
JEMALLOC_ALWAYS_INLINE index_t
|
||||
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
||||
{
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
|
||||
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
|
||||
|
||||
@@ -908,7 +754,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
||||
size_t rpages_ind;
|
||||
arena_run_t *run;
|
||||
arena_bin_t *bin;
|
||||
size_t actual_binind;
|
||||
index_t actual_binind;
|
||||
arena_bin_info_t *bin_info;
|
||||
arena_chunk_map_misc_t *miscelm;
|
||||
void *rpages;
|
||||
@@ -938,13 +784,13 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
||||
|
||||
return (binind);
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||
|
||||
# ifdef JEMALLOC_ARENA_INLINE_C
|
||||
JEMALLOC_INLINE size_t
|
||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||
JEMALLOC_INLINE index_t
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||
{
|
||||
size_t binind = bin - arena->bins;
|
||||
index_t binind = bin - arena->bins;
|
||||
assert(binind < NBINS);
|
||||
return (binind);
|
||||
}
|
||||
@@ -1102,7 +948,8 @@ arena_salloc(const void *ptr, bool demote)
|
||||
{
|
||||
size_t ret;
|
||||
arena_chunk_t *chunk;
|
||||
size_t pageind, binind;
|
||||
size_t pageind;
|
||||
index_t binind;
|
||||
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
@@ -1122,10 +969,6 @@ arena_salloc(const void *ptr, bool demote)
|
||||
ret = arena_mapbits_large_size_get(chunk, pageind);
|
||||
assert(ret != 0);
|
||||
assert(pageind + (ret>>LG_PAGE) <= chunk_npages);
|
||||
assert(ret == PAGE || arena_mapbits_large_size_get(chunk,
|
||||
pageind+(ret>>LG_PAGE)-1) == 0);
|
||||
assert(binind == arena_mapbits_binind_get(chunk,
|
||||
pageind+(ret>>LG_PAGE)-1));
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||
arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1));
|
||||
} else {
|
||||
@@ -1133,7 +976,7 @@ arena_salloc(const void *ptr, bool demote)
|
||||
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
||||
arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
|
||||
pageind)) == binind);
|
||||
ret = small_bin2size(binind);
|
||||
ret = index2size(binind);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
@@ -1155,7 +998,7 @@ arena_dalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, bool try_tcache)
|
||||
/* Small allocation. */
|
||||
if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
|
||||
false)) != NULL)) {
|
||||
size_t binind = arena_ptr_small_binind_get(ptr,
|
||||
index_t binind = arena_ptr_small_binind_get(ptr,
|
||||
mapbits);
|
||||
tcache_dalloc_small(tcache, ptr, binind);
|
||||
} else
|
||||
@@ -1186,7 +1029,7 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
||||
/* Small allocation. */
|
||||
if (likely(try_tcache) && likely((tcache = tcache_get(tsd,
|
||||
false)) != NULL)) {
|
||||
size_t binind = small_size2bin(size);
|
||||
index_t binind = size2index(size);
|
||||
tcache_dalloc_small(tcache, ptr, binind);
|
||||
} else {
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
||||
@@ -1203,7 +1046,7 @@ arena_sdalloc(tsd_t *tsd, arena_chunk_t *chunk, void *ptr, size_t size,
|
||||
arena_dalloc_large(chunk->arena, chunk, ptr);
|
||||
}
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_C */
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
|
@@ -40,9 +40,6 @@ extern rtree_t *chunks_rtree;
|
||||
extern size_t chunksize;
|
||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||
extern size_t chunk_npages;
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t map_misc_offset;
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc_base(size_t size);
|
||||
void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
|
||||
|
@@ -10,7 +10,7 @@
|
||||
#ifdef JEMALLOC_H_EXTERNS
|
||||
|
||||
void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero);
|
||||
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
|
||||
void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
|
||||
bool zero);
|
||||
bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
|
||||
size_t extra, bool zero);
|
||||
|
@@ -165,6 +165,9 @@ static const bool config_ivsalloc =
|
||||
|
||||
#include "jemalloc/internal/jemalloc_internal_macros.h"
|
||||
|
||||
/* Size class index type. */
|
||||
typedef unsigned index_t;
|
||||
|
||||
#define MALLOCX_ARENA_MASK ((int)~0xff)
|
||||
#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
|
||||
/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
|
||||
@@ -397,6 +400,18 @@ extern arena_t **arenas;
|
||||
extern unsigned narenas_total;
|
||||
extern unsigned narenas_auto; /* Read-only after initialization. */
|
||||
|
||||
/*
|
||||
* index2size_tab encodes the same information as could be computed (at
|
||||
* unacceptable cost in some code paths) by index2size_compute().
|
||||
*/
|
||||
extern size_t const index2size_tab[NSIZES];
|
||||
/*
|
||||
* size2index_tab is a compact lookup table that rounds request sizes up to
|
||||
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||
* and all accesses are via size2index().
|
||||
*/
|
||||
extern uint8_t const size2index_tab[];
|
||||
|
||||
arena_t *arenas_extend(unsigned ind);
|
||||
arena_t *choose_arena_hard(tsd_t *tsd);
|
||||
void thread_allocated_cleanup(tsd_t *tsd);
|
||||
@@ -449,15 +464,15 @@ void jemalloc_postfork_child(void);
|
||||
#include "jemalloc/internal/chunk.h"
|
||||
#include "jemalloc/internal/huge.h"
|
||||
|
||||
/*
|
||||
* Include arena.h the first time in order to provide inline functions for this
|
||||
* header's inlines.
|
||||
*/
|
||||
#define JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_A
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
index_t size2index_compute(size_t size);
|
||||
index_t size2index_lookup(size_t size);
|
||||
index_t size2index(size_t size);
|
||||
size_t index2size_compute(index_t index);
|
||||
size_t index2size_lookup(index_t index);
|
||||
size_t index2size(index_t index);
|
||||
size_t s2u_compute(size_t size);
|
||||
size_t s2u_lookup(size_t size);
|
||||
size_t s2u(size_t size);
|
||||
size_t sa2u(size_t size, size_t alignment);
|
||||
unsigned narenas_total_get(void);
|
||||
@@ -465,6 +480,135 @@ arena_t *choose_arena(tsd_t *tsd, arena_t *arena);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
JEMALLOC_INLINE index_t
|
||||
size2index_compute(size_t size)
|
||||
{
|
||||
|
||||
#if (NTBINS != 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||
size_t lg_ceil = lg_floor(pow2_ceil(size));
|
||||
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
size_t x = lg_floor((size<<1)-1);
|
||||
size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
|
||||
x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
|
||||
size_t grp = shift << LG_SIZE_CLASS_GROUP;
|
||||
|
||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||
|
||||
size_t delta_inverse_mask = ZI(-1) << lg_delta;
|
||||
size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
|
||||
((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
|
||||
|
||||
size_t index = NTBINS + grp + mod;
|
||||
return (index);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE index_t
|
||||
size2index_lookup(size_t size)
|
||||
{
|
||||
|
||||
assert(size <= LOOKUP_MAXCLASS);
|
||||
{
|
||||
size_t ret = ((size_t)(size2index_tab[(size-1) >>
|
||||
LG_TINY_MIN]));
|
||||
assert(ret == size2index_compute(size));
|
||||
return (ret);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE index_t
|
||||
size2index(size_t size)
|
||||
{
|
||||
|
||||
assert(size > 0);
|
||||
if (likely(size <= LOOKUP_MAXCLASS))
|
||||
return (size2index_lookup(size));
|
||||
else
|
||||
return (size2index_compute(size));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE size_t
|
||||
index2size_compute(index_t index)
|
||||
{
|
||||
|
||||
#if (NTBINS > 0)
|
||||
if (index < NTBINS)
|
||||
return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
|
||||
else
|
||||
#endif
|
||||
{
|
||||
size_t reduced_index = index - NTBINS;
|
||||
size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
|
||||
size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
|
||||
1);
|
||||
|
||||
size_t grp_size_mask = ~((!!grp)-1);
|
||||
size_t grp_size = ((ZU(1) << (LG_QUANTUM +
|
||||
(LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
|
||||
|
||||
size_t shift = (grp == 0) ? 1 : grp;
|
||||
size_t lg_delta = shift + (LG_QUANTUM-1);
|
||||
size_t mod_size = (mod+1) << lg_delta;
|
||||
|
||||
size_t usize = grp_size + mod_size;
|
||||
return (usize);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
index2size_lookup(index_t index)
|
||||
{
|
||||
size_t ret = (size_t)index2size_tab[index];
|
||||
assert(ret == index2size_compute(index));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
index2size(index_t index)
|
||||
{
|
||||
|
||||
assert(index < NSIZES);
|
||||
return (index2size_lookup(index));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u_compute(size_t size)
|
||||
{
|
||||
|
||||
#if (NTBINS > 0)
|
||||
if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
|
||||
size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
|
||||
size_t lg_ceil = lg_floor(pow2_ceil(size));
|
||||
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
|
||||
(ZU(1) << lg_ceil));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
size_t x = lg_floor((size<<1)-1);
|
||||
size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
|
||||
? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
|
||||
size_t delta = ZU(1) << lg_delta;
|
||||
size_t delta_mask = delta - 1;
|
||||
size_t usize = (size + delta_mask) & ~delta_mask;
|
||||
return (usize);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u_lookup(size_t size)
|
||||
{
|
||||
size_t ret = index2size_lookup(size2index_lookup(size));
|
||||
|
||||
assert(ret == s2u_compute(size));
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute usable size that would result from allocating an object with the
|
||||
* specified size.
|
||||
@@ -473,11 +617,11 @@ JEMALLOC_ALWAYS_INLINE size_t
|
||||
s2u(size_t size)
|
||||
{
|
||||
|
||||
if (size <= SMALL_MAXCLASS)
|
||||
return (small_s2u(size));
|
||||
if (size <= arena_maxclass)
|
||||
return (PAGE_CEILING(size));
|
||||
return (CHUNK_CEILING(size));
|
||||
assert(size > 0);
|
||||
if (likely(size <= LOOKUP_MAXCLASS))
|
||||
return (s2u_lookup(size));
|
||||
else
|
||||
return (s2u_compute(size));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -491,71 +635,78 @@ sa2u(size_t size, size_t alignment)
|
||||
|
||||
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
|
||||
|
||||
/*
|
||||
* Round size up to the nearest multiple of alignment.
|
||||
*
|
||||
* This done, we can take advantage of the fact that for each small
|
||||
* size class, every object is aligned at the smallest power of two
|
||||
* that is non-zero in the base two representation of the size. For
|
||||
* example:
|
||||
*
|
||||
* Size | Base 2 | Minimum alignment
|
||||
* -----+----------+------------------
|
||||
* 96 | 1100000 | 32
|
||||
* 144 | 10100000 | 32
|
||||
* 192 | 11000000 | 64
|
||||
*/
|
||||
usize = ALIGNMENT_CEILING(size, alignment);
|
||||
/*
|
||||
* (usize < size) protects against the combination of maximal
|
||||
* alignment and size greater than maximal alignment.
|
||||
*/
|
||||
if (usize < size) {
|
||||
/* size_t overflow. */
|
||||
return (0);
|
||||
/* Try for a small size class. */
|
||||
if (size <= SMALL_MAXCLASS && alignment < PAGE) {
|
||||
/*
|
||||
* Round size up to the nearest multiple of alignment.
|
||||
*
|
||||
* This done, we can take advantage of the fact that for each
|
||||
* small size class, every object is aligned at the smallest
|
||||
* power of two that is non-zero in the base two representation
|
||||
* of the size. For example:
|
||||
*
|
||||
* Size | Base 2 | Minimum alignment
|
||||
* -----+----------+------------------
|
||||
* 96 | 1100000 | 32
|
||||
* 144 | 10100000 | 32
|
||||
* 192 | 11000000 | 64
|
||||
*/
|
||||
usize = s2u(ALIGNMENT_CEILING(size, alignment));
|
||||
if (usize < LARGE_MINCLASS)
|
||||
return (usize);
|
||||
}
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE) {
|
||||
if (usize <= SMALL_MAXCLASS)
|
||||
return (small_s2u(usize));
|
||||
return (PAGE_CEILING(usize));
|
||||
} else {
|
||||
size_t run_size;
|
||||
|
||||
/* Try for a large size class. */
|
||||
if (size <= arena_maxclass && alignment < chunksize) {
|
||||
/*
|
||||
* We can't achieve subpage alignment, so round up alignment
|
||||
* permanently; it makes later calculations simpler.
|
||||
* to the minimum that can actually be supported.
|
||||
*/
|
||||
alignment = PAGE_CEILING(alignment);
|
||||
usize = PAGE_CEILING(size);
|
||||
/*
|
||||
* (usize < size) protects against very large sizes within
|
||||
* PAGE of SIZE_T_MAX.
|
||||
*
|
||||
* (usize + alignment < usize) protects against the
|
||||
* combination of maximal alignment and usize large enough
|
||||
* to cause overflow. This is similar to the first overflow
|
||||
* check above, but it needs to be repeated due to the new
|
||||
* usize value, which may now be *equal* to maximal
|
||||
* alignment, whereas before we only detected overflow if the
|
||||
* original size was *greater* than maximal alignment.
|
||||
*/
|
||||
if (usize < size || usize + alignment < usize) {
|
||||
/* size_t overflow. */
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Make sure result is a large size class. */
|
||||
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
|
||||
|
||||
/*
|
||||
* Calculate the size of the over-size run that arena_palloc()
|
||||
* would need to allocate in order to guarantee the alignment.
|
||||
* If the run wouldn't fit within a chunk, round up to a huge
|
||||
* allocation size.
|
||||
*/
|
||||
run_size = usize + alignment - PAGE;
|
||||
if (run_size <= arena_maxclass)
|
||||
return (PAGE_CEILING(usize));
|
||||
return (CHUNK_CEILING(usize));
|
||||
if (usize + alignment - PAGE <= arena_maxrun)
|
||||
return (usize);
|
||||
}
|
||||
|
||||
/* Huge size class. Beware of size_t overflow. */
|
||||
|
||||
/*
|
||||
* We can't achieve subchunk alignment, so round up alignment to the
|
||||
* minimum that can actually be supported.
|
||||
*/
|
||||
alignment = CHUNK_CEILING(alignment);
|
||||
if (alignment == 0) {
|
||||
/* size_t overflow. */
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Make sure result is a huge size class. */
|
||||
if (size <= chunksize)
|
||||
usize = chunksize;
|
||||
else {
|
||||
usize = s2u(size);
|
||||
if (usize < size) {
|
||||
/* size_t overflow. */
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the multi-chunk mapping that huge_palloc() would need in
|
||||
* order to guarantee the alignment.
|
||||
*/
|
||||
if (usize + alignment - PAGE < usize) {
|
||||
/* size_t overflow. */
|
||||
return (0);
|
||||
}
|
||||
return (usize);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE unsigned
|
||||
@@ -591,16 +742,16 @@ choose_arena(tsd_t *tsd, arena_t *arena)
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
/*
|
||||
* Include arena.h the second and third times in order to resolve circular
|
||||
* dependencies with tcache.h.
|
||||
* Include portions of arena.h interleaved with tcache.h in order to resolve
|
||||
* circular dependencies.
|
||||
*/
|
||||
#define JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#define JEMALLOC_ARENA_INLINE_B
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_B
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#define JEMALLOC_ARENA_INLINE_C
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_C
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/quarantine.h"
|
||||
|
||||
@@ -678,7 +829,7 @@ ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero, bool try_tcache,
|
||||
assert(usize != 0);
|
||||
assert(usize == sa2u(usize, alignment));
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE)
|
||||
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
|
||||
ret = arena_malloc(tsd, arena, usize, zero, try_tcache);
|
||||
else {
|
||||
if (usize <= arena_maxclass) {
|
||||
@@ -742,7 +893,7 @@ u2rz(size_t usize)
|
||||
size_t ret;
|
||||
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
size_t binind = small_size2bin(usize);
|
||||
index_t binind = size2index(usize);
|
||||
ret = arena_bin_info[binind].redzone_size;
|
||||
} else
|
||||
ret = 0;
|
||||
|
@@ -41,6 +41,7 @@ arena_mapbitsp_get
|
||||
arena_mapbitsp_read
|
||||
arena_mapbitsp_write
|
||||
arena_maxclass
|
||||
arena_maxrun
|
||||
arena_miscelm_get
|
||||
arena_miscelm_to_pageind
|
||||
arena_miscelm_to_rpages
|
||||
@@ -216,6 +217,10 @@ idalloct
|
||||
imalloc
|
||||
imalloct
|
||||
in_valgrind
|
||||
index2size
|
||||
index2size_compute
|
||||
index2size_lookup
|
||||
index2size_tab
|
||||
ipalloc
|
||||
ipalloct
|
||||
iqalloc
|
||||
@@ -338,19 +343,14 @@ rtree_postfork_parent
|
||||
rtree_prefork
|
||||
rtree_set
|
||||
s2u
|
||||
s2u_compute
|
||||
s2u_lookup
|
||||
sa2u
|
||||
set_errno
|
||||
small_bin2size
|
||||
small_bin2size_compute
|
||||
small_bin2size_lookup
|
||||
small_bin2size_tab
|
||||
small_s2u
|
||||
small_s2u_compute
|
||||
small_s2u_lookup
|
||||
small_size2bin
|
||||
small_size2bin_compute
|
||||
small_size2bin_lookup
|
||||
small_size2bin_tab
|
||||
size2index
|
||||
size2index_compute
|
||||
size2index_lookup
|
||||
size2index_tab
|
||||
stats_cactive
|
||||
stats_cactive_add
|
||||
stats_cactive_get
|
||||
|
@@ -61,7 +61,7 @@ size_class() {
|
||||
rem="yes"
|
||||
fi
|
||||
|
||||
if [ ${lg_size} -lt ${lg_p} ] ; then
|
||||
if [ ${lg_size} -lt $((${lg_p} + ${lg_g})) ] ; then
|
||||
bin="yes"
|
||||
else
|
||||
bin="no"
|
||||
@@ -159,6 +159,7 @@ size_classes() {
|
||||
nbins=$((${index} + 1))
|
||||
# Final written value is correct:
|
||||
small_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))"
|
||||
lg_large_minclass=$((${lg_grp} + 1))
|
||||
fi
|
||||
index=$((${index} + 1))
|
||||
ndelta=$((${ndelta} + 1))
|
||||
@@ -167,14 +168,17 @@ size_classes() {
|
||||
lg_delta=$((${lg_delta} + 1))
|
||||
done
|
||||
echo
|
||||
nsizes=${index}
|
||||
|
||||
# Defined upon completion:
|
||||
# - ntbins
|
||||
# - nlbins
|
||||
# - nbins
|
||||
# - nsizes
|
||||
# - lg_tiny_maxclass
|
||||
# - lookup_maxclass
|
||||
# - small_maxclass
|
||||
# - lg_large_minclass
|
||||
}
|
||||
|
||||
cat <<EOF
|
||||
@@ -199,10 +203,11 @@ cat <<EOF
|
||||
* NTBINS: Number of tiny bins.
|
||||
* NLBINS: Number of bins supported by the lookup table.
|
||||
* NBINS: Number of small size class bins.
|
||||
* NSIZES: Number of size classes.
|
||||
* LG_TINY_MAXCLASS: Lg of maximum tiny size class.
|
||||
* LOOKUP_MAXCLASS: Maximum size class included in lookup table.
|
||||
* SMALL_MAXCLASS: Maximum small size class.
|
||||
* LARGE_MINCLASS: Minimum large size class.
|
||||
* LG_LARGE_MINCLASS: Lg of minimum large size class.
|
||||
*/
|
||||
|
||||
#define LG_SIZE_CLASS_GROUP ${lg_g}
|
||||
@@ -221,9 +226,11 @@ for lg_z in ${lg_zarr} ; do
|
||||
echo "#define NTBINS ${ntbins}"
|
||||
echo "#define NLBINS ${nlbins}"
|
||||
echo "#define NBINS ${nbins}"
|
||||
echo "#define NSIZES ${nsizes}"
|
||||
echo "#define LG_TINY_MAXCLASS ${lg_tiny_maxclass}"
|
||||
echo "#define LOOKUP_MAXCLASS ${lookup_maxclass}"
|
||||
echo "#define SMALL_MAXCLASS ${small_maxclass}"
|
||||
echo "#define LG_LARGE_MINCLASS ${lg_large_minclass}"
|
||||
echo "#endif"
|
||||
echo
|
||||
done
|
||||
@@ -238,7 +245,7 @@ cat <<EOF
|
||||
#endif
|
||||
#undef SIZE_CLASSES_DEFINED
|
||||
/*
|
||||
* The small_size2bin lookup table uses uint8_t to encode each bin index, so we
|
||||
* The size2index_tab lookup table uses uint8_t to encode each bin index, so we
|
||||
* cannot support more than 256 small size classes. Further constrain NBINS to
|
||||
* 255 since all small size classes, plus a "not small" size class must be
|
||||
* stored in 8 bits of arena_chunk_map_bits_t's bits field.
|
||||
@@ -247,8 +254,6 @@ cat <<EOF
|
||||
# error "Too many small size classes"
|
||||
#endif
|
||||
|
||||
#define LARGE_MINCLASS (PAGE_CEILING(SMALL_MAXCLASS+1))
|
||||
|
||||
#endif /* JEMALLOC_H_TYPES */
|
||||
/******************************************************************************/
|
||||
#ifdef JEMALLOC_H_STRUCTS
|
||||
|
@@ -106,12 +106,7 @@ struct arena_stats_s {
|
||||
uint64_t ndalloc_huge;
|
||||
uint64_t nrequests_huge;
|
||||
|
||||
/*
|
||||
* One element for each possible size class, including sizes that
|
||||
* overlap with bin size classes. This is necessary because ipalloc()
|
||||
* sometimes has to use such large objects in order to assure proper
|
||||
* alignment.
|
||||
*/
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t *lstats;
|
||||
};
|
||||
|
||||
|
@@ -72,7 +72,7 @@ struct tcache_s {
|
||||
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */
|
||||
arena_t *arena; /* This thread's arena. */
|
||||
unsigned ev_cnt; /* Event count since incremental GC. */
|
||||
unsigned next_gc_bin; /* Next bin to GC. */
|
||||
index_t next_gc_bin; /* Next bin to GC. */
|
||||
tcache_bin_t tbins[1]; /* Dynamically sized. */
|
||||
/*
|
||||
* The pointer stacks associated with tbins follow as a contiguous
|
||||
@@ -103,10 +103,10 @@ extern size_t tcache_maxclass;
|
||||
size_t tcache_salloc(const void *ptr);
|
||||
void tcache_event_hard(tcache_t *tcache);
|
||||
void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin,
|
||||
size_t binind);
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
index_t binind);
|
||||
void tcache_bin_flush_small(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
|
||||
void tcache_bin_flush_large(tcache_bin_t *tbin, index_t binind, unsigned rem,
|
||||
tcache_t *tcache);
|
||||
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
|
||||
void tcache_arena_dissociate(tcache_t *tcache);
|
||||
@@ -130,7 +130,7 @@ void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin);
|
||||
void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero);
|
||||
void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind);
|
||||
void tcache_dalloc_small(tcache_t *tcache, void *ptr, index_t binind);
|
||||
void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size);
|
||||
#endif
|
||||
|
||||
@@ -233,20 +233,21 @@ JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
size_t usize;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
binind = small_size2bin(size);
|
||||
binind = size2index(size);
|
||||
assert(binind < NBINS);
|
||||
tbin = &tcache->tbins[binind];
|
||||
size = small_bin2size(binind);
|
||||
usize = index2size(binind);
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (unlikely(ret == NULL)) {
|
||||
ret = tcache_alloc_small_hard(tcache, tbin, binind);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
assert(tcache_salloc(ret) == size);
|
||||
assert(tcache_salloc(ret) == usize);
|
||||
|
||||
if (likely(!zero)) {
|
||||
if (config_fill) {
|
||||
@@ -254,20 +255,20 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
||||
arena_alloc_junk_small(ret,
|
||||
&arena_bin_info[binind], false);
|
||||
} else if (unlikely(opt_zero))
|
||||
memset(ret, 0, size);
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
} else {
|
||||
if (config_fill && unlikely(opt_junk)) {
|
||||
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
||||
true);
|
||||
}
|
||||
memset(ret, 0, size);
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += size;
|
||||
tcache->prof_accumbytes += usize;
|
||||
tcache_event(tcache);
|
||||
return (ret);
|
||||
}
|
||||
@@ -276,12 +277,13 @@ JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
{
|
||||
void *ret;
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
size_t usize;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
size = PAGE_CEILING(size);
|
||||
assert(size <= tcache_maxclass);
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
binind = size2index(size);
|
||||
usize = index2size(binind);
|
||||
assert(usize <= tcache_maxclass);
|
||||
assert(binind < nhbins);
|
||||
tbin = &tcache->tbins[binind];
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
@@ -290,11 +292,11 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
ret = arena_malloc_large(tcache->arena, size, zero);
|
||||
ret = arena_malloc_large(tcache->arena, usize, zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
if (config_prof && size == PAGE) {
|
||||
if (config_prof && usize == LARGE_MINCLASS) {
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
@@ -305,17 +307,17 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
if (likely(!zero)) {
|
||||
if (config_fill) {
|
||||
if (unlikely(opt_junk))
|
||||
memset(ret, 0xa5, size);
|
||||
memset(ret, 0xa5, usize);
|
||||
else if (unlikely(opt_zero))
|
||||
memset(ret, 0, size);
|
||||
memset(ret, 0, usize);
|
||||
}
|
||||
} else
|
||||
memset(ret, 0, size);
|
||||
memset(ret, 0, usize);
|
||||
|
||||
if (config_stats)
|
||||
tbin->tstats.nrequests++;
|
||||
if (config_prof)
|
||||
tcache->prof_accumbytes += size;
|
||||
tcache->prof_accumbytes += usize;
|
||||
}
|
||||
|
||||
tcache_event(tcache);
|
||||
@@ -323,7 +325,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||
tcache_dalloc_small(tcache_t *tcache, void *ptr, index_t binind)
|
||||
{
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
@@ -349,7 +351,7 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind)
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
{
|
||||
size_t binind;
|
||||
index_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
@@ -357,7 +359,7 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(ptr) <= tcache_maxclass);
|
||||
|
||||
binind = NBINS + (size >> LG_PAGE) - 1;
|
||||
binind = size2index(size);
|
||||
|
||||
if (config_fill && unlikely(opt_junk))
|
||||
memset(ptr, 0x5a, size);
|
||||
|
Reference in New Issue
Block a user