Use huge size class infrastructure for large size classes.
This commit is contained in:
@@ -294,7 +294,6 @@ struct arena_s {
|
||||
|
||||
dss_prec_t dss_prec;
|
||||
|
||||
|
||||
/* Extant arena chunks. */
|
||||
ql_head(extent_t) achunks;
|
||||
|
||||
@@ -465,9 +464,6 @@ extern const arena_bin_info_t arena_bin_info[NBINS];
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t map_misc_offset;
|
||||
extern size_t arena_maxrun; /* Max run size for arenas. */
|
||||
extern size_t large_maxclass; /* Max large size class. */
|
||||
extern unsigned nlclasses; /* Number of large size classes. */
|
||||
extern unsigned nhclasses; /* Number of huge size classes. */
|
||||
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef size_t (run_quantize_t)(size_t);
|
||||
@@ -485,7 +481,8 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent,
|
||||
bool cache);
|
||||
extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena,
|
||||
size_t usize, size_t alignment, bool *zero);
|
||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
|
||||
void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
bool locked);
|
||||
void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
|
||||
extent_t *extent, size_t oldsize);
|
||||
void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
|
||||
@@ -508,33 +505,19 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
|
||||
#else
|
||||
void arena_dalloc_junk_small(void *ptr, const arena_bin_info_t *bin_info);
|
||||
#endif
|
||||
void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
|
||||
bool zero);
|
||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||
szind_t ind, bool zero);
|
||||
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||
size_t alignment, bool zero, tcache_t *tcache);
|
||||
void arena_prof_promoted(tsdn_t *tsdn, const extent_t *extent,
|
||||
const void *ptr, size_t size);
|
||||
void arena_prof_promote(tsdn_t *tsdn, extent_t *extent, const void *ptr,
|
||||
size_t usize);
|
||||
void arena_dalloc_promoted(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||
tcache_t *tcache, bool slow_path);
|
||||
void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_chunk_t *chunk, extent_t *extent, void *ptr,
|
||||
arena_chunk_map_bits_t *bitselm);
|
||||
void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||
extent_t *extent, void *ptr, size_t pageind);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
|
||||
extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
|
||||
#else
|
||||
void arena_dalloc_junk_large(void *ptr, size_t usize);
|
||||
#endif
|
||||
void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
|
||||
arena_chunk_t *chunk, extent_t *extent, void *ptr);
|
||||
void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
|
||||
extent_t *extent, void *ptr);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
|
||||
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
|
||||
#endif
|
||||
bool arena_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||
size_t oldsize, size_t size, size_t extra, bool zero);
|
||||
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr,
|
||||
@@ -551,8 +534,7 @@ void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
|
||||
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
|
||||
size_t *nactive, size_t *ndirty, arena_stats_t *astats,
|
||||
malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
|
||||
malloc_huge_stats_t *hstats);
|
||||
malloc_bin_stats_t *bstats, malloc_huge_stats_t *hstats);
|
||||
unsigned arena_nthreads_get(arena_t *arena, bool internal);
|
||||
void arena_nthreads_inc(arena_t *arena, bool internal);
|
||||
void arena_nthreads_dec(arena_t *arena, bool internal);
|
||||
@@ -639,8 +621,7 @@ void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
|
||||
void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
||||
bool zero, tcache_t *tcache, bool slow_path);
|
||||
arena_t *arena_aalloc(tsdn_t *tsdn, const void *ptr);
|
||||
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
|
||||
bool demote);
|
||||
size_t arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
|
||||
void arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr,
|
||||
tcache_t *tcache, bool slow_path);
|
||||
void arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
@@ -1225,7 +1206,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
tcache, size, ind, zero, slow_path));
|
||||
}
|
||||
if (likely(size <= tcache_maxclass)) {
|
||||
return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||
return (tcache_alloc_huge(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path));
|
||||
}
|
||||
/* (size > tcache_maxclass) case falls through. */
|
||||
@@ -1244,49 +1225,25 @@ arena_aalloc(tsdn_t *tsdn, const void *ptr)
|
||||
|
||||
/* Return the size of the allocation pointed to by ptr. */
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
|
||||
arena_salloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
{
|
||||
size_t ret;
|
||||
size_t pageind;
|
||||
szind_t binind;
|
||||
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (likely(extent_slab_get(extent))) {
|
||||
const arena_chunk_t *chunk =
|
||||
(const arena_chunk_t *)extent_base_get(extent);
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
szind_t binind;
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||
binind = arena_mapbits_binind_get(chunk, pageind);
|
||||
if (unlikely(binind == BININD_INVALID || (config_prof && !demote
|
||||
&& arena_mapbits_large_get(chunk, pageind) != 0))) {
|
||||
/*
|
||||
* Large allocation. In the common case (demote), and
|
||||
* as this is an inline function, most callers will only
|
||||
* end up looking at binind to determine that ptr is a
|
||||
* small allocation.
|
||||
*/
|
||||
assert(config_cache_oblivious || ((uintptr_t)ptr &
|
||||
PAGE_MASK) == 0);
|
||||
ret = arena_mapbits_large_size_get(chunk, pageind) -
|
||||
large_pad;
|
||||
assert(ret != 0);
|
||||
assert(pageind + ((ret+large_pad)>>LG_PAGE) <=
|
||||
chunk_npages);
|
||||
assert(arena_mapbits_dirty_get(chunk, pageind) ==
|
||||
arena_mapbits_dirty_get(chunk,
|
||||
pageind+((ret+large_pad)>>LG_PAGE)-1));
|
||||
} else {
|
||||
/*
|
||||
* Small allocation (possibly promoted to a large
|
||||
* object).
|
||||
*/
|
||||
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
||||
arena_ptr_small_binind_get(tsdn, ptr,
|
||||
arena_mapbits_get(chunk, pageind)) == binind);
|
||||
ret = index2size(binind);
|
||||
}
|
||||
/* Small allocation. */
|
||||
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
||||
arena_ptr_small_binind_get(tsdn, ptr,
|
||||
arena_mapbits_get(chunk, pageind)) == binind);
|
||||
ret = index2size(binind);
|
||||
} else
|
||||
ret = huge_salloc(tsdn, extent);
|
||||
|
||||
@@ -1297,49 +1254,40 @@ JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
bool slow_path)
|
||||
{
|
||||
size_t pageind, mapbits;
|
||||
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (likely(extent_slab_get(extent))) {
|
||||
/* Small allocation. */
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
||||
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
mapbits = arena_mapbits_get(chunk, pageind);
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
|
||||
size_t mapbits = arena_mapbits_get(chunk, pageind);
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
|
||||
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
|
||||
/* Small allocation. */
|
||||
if (likely(tcache != NULL)) {
|
||||
szind_t binind =
|
||||
arena_ptr_small_binind_get(tsdn, ptr,
|
||||
mapbits);
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
||||
binind, slow_path);
|
||||
} else {
|
||||
arena_dalloc_small(tsdn,
|
||||
extent_arena_get(extent), chunk, extent,
|
||||
ptr, pageind);
|
||||
}
|
||||
assert((mapbits & CHUNK_MAP_LARGE) == 0);
|
||||
if (likely(tcache != NULL)) {
|
||||
szind_t binind = arena_ptr_small_binind_get(tsdn, ptr,
|
||||
mapbits);
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||
slow_path);
|
||||
} else {
|
||||
size_t size = arena_mapbits_large_size_get(chunk,
|
||||
pageind);
|
||||
|
||||
assert(config_cache_oblivious || ((uintptr_t)ptr &
|
||||
PAGE_MASK) == 0);
|
||||
|
||||
if (likely(tcache != NULL) && size - large_pad <=
|
||||
tcache_maxclass) {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size - large_pad, slow_path);
|
||||
} else {
|
||||
arena_dalloc_large(tsdn,
|
||||
extent_arena_get(extent), chunk, extent,
|
||||
ptr);
|
||||
}
|
||||
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||
chunk, extent, ptr, pageind);
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
} else {
|
||||
size_t usize = extent_usize_get(extent);
|
||||
|
||||
if (likely(tcache != NULL) && usize <= tcache_maxclass) {
|
||||
if (config_prof && unlikely(usize <= SMALL_MAXCLASS)) {
|
||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||
tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
||||
usize, slow_path);
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -1348,55 +1296,34 @@ arena_sdalloc(tsdn_t *tsdn, extent_t *extent, void *ptr, size_t size,
|
||||
{
|
||||
|
||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||
assert(ptr != NULL);
|
||||
|
||||
if (likely(extent_slab_get(extent))) {
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)extent_base_get(extent);
|
||||
|
||||
if (config_prof && opt_prof) {
|
||||
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
|
||||
LG_PAGE;
|
||||
assert(arena_mapbits_allocated_get(chunk, pageind) !=
|
||||
0);
|
||||
if (arena_mapbits_large_get(chunk, pageind) != 0) {
|
||||
/*
|
||||
* Make sure to use promoted size, not request
|
||||
* size.
|
||||
*/
|
||||
size = arena_mapbits_large_size_get(chunk,
|
||||
pageind) - large_pad;
|
||||
}
|
||||
}
|
||||
assert(s2u(size) == s2u(arena_salloc(tsdn, extent, ptr,
|
||||
false)));
|
||||
|
||||
if (likely(size <= SMALL_MAXCLASS)) {
|
||||
/* Small allocation. */
|
||||
if (likely(tcache != NULL)) {
|
||||
szind_t binind = size2index(size);
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
|
||||
binind, slow_path);
|
||||
} else {
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_dalloc_small(tsdn,
|
||||
extent_arena_get(extent), chunk, extent,
|
||||
ptr, pageind);
|
||||
}
|
||||
/* Small allocation. */
|
||||
if (likely(tcache != NULL)) {
|
||||
szind_t binind = size2index(size);
|
||||
tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, binind,
|
||||
slow_path);
|
||||
} else {
|
||||
assert(config_cache_oblivious || ((uintptr_t)ptr &
|
||||
PAGE_MASK) == 0);
|
||||
|
||||
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size, slow_path);
|
||||
} else {
|
||||
arena_dalloc_large(tsdn,
|
||||
extent_arena_get(extent), chunk, extent,
|
||||
ptr);
|
||||
}
|
||||
arena_chunk_t *chunk =
|
||||
(arena_chunk_t *)extent_base_get(extent);
|
||||
size_t pageind = ((uintptr_t)ptr -
|
||||
(uintptr_t)chunk) >> LG_PAGE;
|
||||
arena_dalloc_small(tsdn, extent_arena_get(extent),
|
||||
chunk, extent, ptr, pageind);
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
} else {
|
||||
if (likely(tcache != NULL) && size <= tcache_maxclass) {
|
||||
if (config_prof && unlikely(size <= SMALL_MAXCLASS)) {
|
||||
arena_dalloc_promoted(tsdn, extent, ptr,
|
||||
tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_huge(tsdn_tsd(tsdn), tcache, ptr,
|
||||
size, slow_path);
|
||||
}
|
||||
} else
|
||||
huge_dalloc(tsdn, extent);
|
||||
}
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
#endif
|
||||
|
@@ -61,7 +61,8 @@ bool chunk_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t offset, size_t length);
|
||||
extent_t *chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t size_b);
|
||||
chunk_hooks_t *chunk_hooks, extent_t *extent, size_t size_a, size_t usize_a,
|
||||
size_t size_b, size_t usize_b);
|
||||
bool chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
|
||||
chunk_hooks_t *chunk_hooks, extent_t *a, extent_t *b);
|
||||
bool chunk_boot(void);
|
||||
|
@@ -51,8 +51,7 @@ struct ctl_arena_stats_s {
|
||||
uint64_t nrequests_small;
|
||||
|
||||
malloc_bin_stats_t bstats[NBINS];
|
||||
malloc_large_stats_t *lstats; /* nlclasses elements. */
|
||||
malloc_huge_stats_t *hstats; /* nhclasses elements. */
|
||||
malloc_huge_stats_t hstats[NSIZES - NBINS];
|
||||
};
|
||||
|
||||
struct ctl_stats_s {
|
||||
|
@@ -15,9 +15,15 @@ struct extent_s {
|
||||
/* Pointer to the extent that this structure is responsible for. */
|
||||
void *e_addr;
|
||||
|
||||
/* Total region size. */
|
||||
/* Extent size. */
|
||||
size_t e_size;
|
||||
|
||||
/*
|
||||
* Usable size, typically smaller than extent size due to large_pad or
|
||||
* promotion of sampled small regions.
|
||||
*/
|
||||
size_t e_usize;
|
||||
|
||||
/* True if extent is active (in use). */
|
||||
bool e_active;
|
||||
|
||||
@@ -106,6 +112,7 @@ void extent_arena_set(extent_t *extent, arena_t *arena);
|
||||
void extent_addr_set(extent_t *extent, void *addr);
|
||||
void extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment);
|
||||
void extent_size_set(extent_t *extent, size_t size);
|
||||
void extent_usize_set(extent_t *extent, size_t usize);
|
||||
void extent_active_set(extent_t *extent, bool active);
|
||||
void extent_dirty_set(extent_t *extent, bool dirty);
|
||||
void extent_zeroed_set(extent_t *extent, bool zeroed);
|
||||
@@ -113,8 +120,8 @@ void extent_committed_set(extent_t *extent, bool committed);
|
||||
void extent_slab_set(extent_t *extent, bool slab);
|
||||
void extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx);
|
||||
void extent_init(extent_t *extent, arena_t *arena, void *addr,
|
||||
size_t size, bool active, bool dirty, bool zeroed, bool committed,
|
||||
bool slab);
|
||||
size_t size, size_t usize, bool active, bool dirty, bool zeroed,
|
||||
bool committed, bool slab);
|
||||
void extent_dirty_insert(extent_t *extent,
|
||||
arena_runs_dirty_link_t *runs_dirty, extent_t *chunks_dirty);
|
||||
void extent_dirty_remove(extent_t *extent);
|
||||
@@ -158,7 +165,7 @@ extent_usize_get(const extent_t *extent)
|
||||
{
|
||||
|
||||
assert(!extent->e_slab);
|
||||
return (extent->e_size - large_pad);
|
||||
return (extent->e_usize);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
@@ -172,14 +179,15 @@ JEMALLOC_INLINE void *
|
||||
extent_last_get(const extent_t *extent)
|
||||
{
|
||||
|
||||
return ((void *)(uintptr_t)extent->e_addr + extent->e_size - PAGE);
|
||||
return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent) -
|
||||
PAGE);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void *
|
||||
extent_past_get(const extent_t *extent)
|
||||
{
|
||||
|
||||
return ((void *)(uintptr_t)extent->e_addr + extent->e_size);
|
||||
return ((void *)(uintptr_t)extent->e_addr + extent_size_get(extent));
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE bool
|
||||
@@ -258,9 +266,12 @@ extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment)
|
||||
uint64_t r =
|
||||
prng_lg_range(&extent_arena_get(extent)->offset_state,
|
||||
lg_range, true);
|
||||
uintptr_t random_offset = ((uintptr_t)r) << lg_range;
|
||||
uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
|
||||
lg_range);
|
||||
extent->e_addr = (void *)((uintptr_t)extent->e_addr +
|
||||
random_offset);
|
||||
assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
|
||||
extent->e_addr);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -271,6 +282,13 @@ extent_size_set(extent_t *extent, size_t size)
|
||||
extent->e_size = size;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_usize_set(extent_t *extent, size_t usize)
|
||||
{
|
||||
|
||||
extent->e_usize = usize;
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_active_set(extent_t *extent, bool active)
|
||||
{
|
||||
@@ -315,7 +333,8 @@ extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx)
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
bool active, bool dirty, bool zeroed, bool committed, bool slab)
|
||||
size_t usize, bool active, bool dirty, bool zeroed, bool committed,
|
||||
bool slab)
|
||||
{
|
||||
|
||||
assert(addr == PAGE_ADDR2BASE(addr) || !slab);
|
||||
@@ -323,6 +342,7 @@ extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
|
||||
extent_arena_set(extent, arena);
|
||||
extent_addr_set(extent, addr);
|
||||
extent_size_set(extent, size);
|
||||
extent_usize_set(extent, usize);
|
||||
extent_active_set(extent, active);
|
||||
extent_dirty_set(extent, dirty);
|
||||
extent_zeroed_set(extent, zeroed);
|
||||
|
@@ -17,9 +17,12 @@ bool huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
|
||||
void *huge_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
|
||||
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
|
||||
#ifdef JEMALLOC_JET
|
||||
typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#else
|
||||
void huge_dalloc_junk(void *ptr, size_t usize);
|
||||
#endif
|
||||
void huge_dalloc_junked_locked(tsdn_t *tsdn, extent_t *extent);
|
||||
void huge_dalloc(tsdn_t *tsdn, extent_t *extent);
|
||||
size_t huge_salloc(tsdn_t *tsdn, const extent_t *extent);
|
||||
prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
|
||||
|
@@ -797,33 +797,14 @@ sa2u(size_t size, size_t alignment)
|
||||
return (usize);
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't achieve subpage alignment, so round up alignment to the
|
||||
* minimum that can actually be supported.
|
||||
*/
|
||||
alignment = PAGE_CEILING(alignment);
|
||||
|
||||
/* Try for a large size class. */
|
||||
if (likely(size <= large_maxclass) && likely(alignment == PAGE)) {
|
||||
/* Make sure result is a large size class. */
|
||||
usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
|
||||
|
||||
/*
|
||||
* Calculate the size of the over-size run that arena_palloc()
|
||||
* would need to allocate in order to guarantee the alignment.
|
||||
*/
|
||||
if (usize + large_pad + alignment <= arena_maxrun)
|
||||
return (usize);
|
||||
}
|
||||
|
||||
/* Huge size class. Beware of overflow. */
|
||||
|
||||
if (unlikely(alignment > HUGE_MAXCLASS))
|
||||
return (0);
|
||||
|
||||
/* Make sure result is a huge size class. */
|
||||
if (size <= chunksize)
|
||||
usize = chunksize;
|
||||
/* Make sure result is a large size class. */
|
||||
if (size <= LARGE_MINCLASS)
|
||||
usize = LARGE_MINCLASS;
|
||||
else {
|
||||
usize = s2u(size);
|
||||
if (usize < size) {
|
||||
@@ -836,7 +817,7 @@ sa2u(size_t size, size_t alignment)
|
||||
* Calculate the multi-page mapping that huge_palloc() would need in
|
||||
* order to guarantee the alignment.
|
||||
*/
|
||||
if (usize + alignment < usize) {
|
||||
if (usize + large_pad + PAGE_CEILING(alignment) < usize) {
|
||||
/* size_t overflow. */
|
||||
return (0);
|
||||
}
|
||||
@@ -960,8 +941,7 @@ iealloc(tsdn_t *tsdn, const void *ptr)
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
arena_t *iaalloc(tsdn_t *tsdn, const void *ptr);
|
||||
size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr,
|
||||
bool demote);
|
||||
size_t isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr);
|
||||
void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
||||
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
|
||||
void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
|
||||
@@ -971,7 +951,7 @@ void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
tcache_t *tcache, arena_t *arena);
|
||||
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
|
||||
size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
|
||||
size_t ivsalloc(tsdn_t *tsdn, const void *ptr);
|
||||
void idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
bool is_metadata, bool slow_path);
|
||||
void idalloc(tsd_t *tsd, extent_t *extent, void *ptr);
|
||||
@@ -1003,17 +983,15 @@ iaalloc(tsdn_t *tsdn, const void *ptr)
|
||||
* tsdn_t *tsdn = [...]
|
||||
* void *ptr = [...]
|
||||
* extent_t *extent = iealloc(tsdn, ptr);
|
||||
* size_t sz = isalloc(tsdn, extent, ptr, config_prof);
|
||||
* size_t sz = isalloc(tsdn, extent, ptr);
|
||||
*/
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr, bool demote)
|
||||
isalloc(tsdn_t *tsdn, const extent_t *extent, const void *ptr)
|
||||
{
|
||||
|
||||
assert(ptr != NULL);
|
||||
/* Demotion only makes sense if config_prof is true. */
|
||||
assert(config_prof || !demote);
|
||||
|
||||
return (arena_salloc(tsdn, extent, ptr, demote));
|
||||
return (arena_salloc(tsdn, extent, ptr));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
@@ -1029,7 +1007,7 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
|
||||
iealloc(tsdn, ret), ret, config_prof));
|
||||
iealloc(tsdn, ret), ret));
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@@ -1057,7 +1035,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||
if (config_stats && is_metadata && likely(ret != NULL)) {
|
||||
arena_metadata_allocated_add(iaalloc(tsdn, ret), isalloc(tsdn,
|
||||
iealloc(tsdn, ret), ret, config_prof));
|
||||
iealloc(tsdn, ret), ret));
|
||||
}
|
||||
return (ret);
|
||||
}
|
||||
@@ -1079,7 +1057,7 @@ ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
||||
ivsalloc(tsdn_t *tsdn, const void *ptr)
|
||||
{
|
||||
extent_t *extent;
|
||||
|
||||
@@ -1091,7 +1069,7 @@ ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
|
||||
/* Only arena chunks should be looked up via interior pointers. */
|
||||
assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
|
||||
|
||||
return (isalloc(tsdn, extent, ptr, demote));
|
||||
return (isalloc(tsdn, extent, ptr));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -1104,7 +1082,7 @@ idalloctm(tsdn_t *tsdn, extent_t *extent, void *ptr, tcache_t *tcache,
|
||||
assert(!is_metadata || iaalloc(tsdn, ptr)->ind < narenas_auto);
|
||||
if (config_stats && is_metadata) {
|
||||
arena_metadata_allocated_sub(iaalloc(tsdn, ptr), isalloc(tsdn,
|
||||
extent, ptr, config_prof));
|
||||
extent, ptr));
|
||||
}
|
||||
|
||||
arena_dalloc(tsdn, extent, ptr, tcache, slow_path);
|
||||
|
@@ -23,10 +23,8 @@ arena_cleanup
|
||||
arena_dalloc
|
||||
arena_dalloc_bin
|
||||
arena_dalloc_bin_junked_locked
|
||||
arena_dalloc_junk_large
|
||||
arena_dalloc_junk_small
|
||||
arena_dalloc_large
|
||||
arena_dalloc_large_junked_locked
|
||||
arena_dalloc_promoted
|
||||
arena_dalloc_small
|
||||
arena_decay_tick
|
||||
arena_decay_ticks
|
||||
@@ -45,7 +43,6 @@ arena_lg_dirty_mult_get
|
||||
arena_lg_dirty_mult_set
|
||||
arena_malloc
|
||||
arena_malloc_hard
|
||||
arena_malloc_large
|
||||
arena_mapbits_allocated_get
|
||||
arena_mapbits_binind_get
|
||||
arena_mapbits_decommitted_get
|
||||
@@ -92,7 +89,7 @@ arena_prefork3
|
||||
arena_prof_accum
|
||||
arena_prof_accum_impl
|
||||
arena_prof_accum_locked
|
||||
arena_prof_promoted
|
||||
arena_prof_promote
|
||||
arena_prof_tctx_get
|
||||
arena_prof_tctx_reset
|
||||
arena_prof_tctx_set
|
||||
@@ -254,6 +251,7 @@ hash_x86_128
|
||||
hash_x86_32
|
||||
huge_dalloc
|
||||
huge_dalloc_junk
|
||||
huge_dalloc_junked_locked
|
||||
huge_malloc
|
||||
huge_palloc
|
||||
huge_prof_tctx_get
|
||||
@@ -287,7 +285,6 @@ ixalloc
|
||||
jemalloc_postfork_child
|
||||
jemalloc_postfork_parent
|
||||
jemalloc_prefork
|
||||
large_maxclass
|
||||
lg_floor
|
||||
lg_prof_sample
|
||||
malloc_cprintf
|
||||
@@ -320,8 +317,6 @@ narenas_tdata_cleanup
|
||||
narenas_total_get
|
||||
ncpus
|
||||
nhbins
|
||||
nhclasses
|
||||
nlclasses
|
||||
nstime_add
|
||||
nstime_compare
|
||||
nstime_copy
|
||||
|
@@ -489,7 +489,7 @@ prof_malloc(tsdn_t *tsdn, extent_t *extent, const void *ptr, size_t usize,
|
||||
|
||||
cassert(config_prof);
|
||||
assert(ptr != NULL);
|
||||
assert(usize == isalloc(tsdn, extent, ptr, true));
|
||||
assert(usize == isalloc(tsdn, extent, ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
prof_malloc_sample_object(tsdn, extent, ptr, usize, tctx);
|
||||
@@ -510,7 +510,7 @@ prof_realloc(tsd_t *tsd, extent_t *extent, const void *ptr, size_t usize,
|
||||
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
|
||||
|
||||
if (prof_active && !updated && ptr != NULL) {
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, true));
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
|
||||
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
|
||||
/*
|
||||
* Don't sample. The usize passed to prof_alloc_prep()
|
||||
@@ -544,7 +544,7 @@ prof_free(tsd_t *tsd, const extent_t *extent, const void *ptr, size_t usize)
|
||||
prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), extent, ptr);
|
||||
|
||||
cassert(config_prof);
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr, true));
|
||||
assert(usize == isalloc(tsd_tsdn(tsd), extent, ptr));
|
||||
|
||||
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
|
||||
prof_free_sampled_object(tsd, usize, tctx);
|
||||
|
@@ -3,7 +3,6 @@
|
||||
|
||||
typedef struct tcache_bin_stats_s tcache_bin_stats_t;
|
||||
typedef struct malloc_bin_stats_s malloc_bin_stats_t;
|
||||
typedef struct malloc_large_stats_s malloc_large_stats_t;
|
||||
typedef struct malloc_huge_stats_s malloc_huge_stats_t;
|
||||
typedef struct arena_stats_s arena_stats_t;
|
||||
typedef struct chunk_stats_s chunk_stats_t;
|
||||
@@ -62,12 +61,10 @@ struct malloc_bin_stats_s {
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct malloc_large_stats_s {
|
||||
struct malloc_huge_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena. Note that tcache may allocate an object, then recycle it
|
||||
* many times, resulting many increments to nrequests, but only one
|
||||
* each to nmalloc and ndalloc.
|
||||
* the arena.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
@@ -79,21 +76,6 @@ struct malloc_large_stats_s {
|
||||
*/
|
||||
uint64_t nrequests;
|
||||
|
||||
/*
|
||||
* Current number of runs of this size class, including runs currently
|
||||
* cached by tcache.
|
||||
*/
|
||||
size_t curruns;
|
||||
};
|
||||
|
||||
struct malloc_huge_stats_s {
|
||||
/*
|
||||
* Total number of allocation/deallocation requests served directly by
|
||||
* the arena.
|
||||
*/
|
||||
uint64_t nmalloc;
|
||||
uint64_t ndalloc;
|
||||
|
||||
/* Current number of (multi-)chunk allocations of this size class. */
|
||||
size_t curhchunks;
|
||||
};
|
||||
@@ -126,21 +108,13 @@ struct arena_stats_s {
|
||||
size_t metadata_mapped;
|
||||
size_t metadata_allocated; /* Protected via atomic_*_z(). */
|
||||
|
||||
/* Per-size-category statistics. */
|
||||
size_t allocated_large;
|
||||
uint64_t nmalloc_large;
|
||||
uint64_t ndalloc_large;
|
||||
uint64_t nrequests_large;
|
||||
|
||||
size_t allocated_huge;
|
||||
uint64_t nmalloc_huge;
|
||||
uint64_t ndalloc_huge;
|
||||
|
||||
/* One element for each large size class. */
|
||||
malloc_large_stats_t *lstats;
|
||||
uint64_t nrequests_huge;
|
||||
|
||||
/* One element for each huge size class. */
|
||||
malloc_huge_stats_t *hstats;
|
||||
malloc_huge_stats_t hstats[NSIZES - NBINS];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_H_STRUCTS */
|
||||
|
@@ -30,8 +30,8 @@ typedef struct tcaches_s tcaches_t;
|
||||
*/
|
||||
#define TCACHE_NSLOTS_SMALL_MAX 200
|
||||
|
||||
/* Number of cache slots for large size classes. */
|
||||
#define TCACHE_NSLOTS_LARGE 20
|
||||
/* Number of cache slots for huge size classes. */
|
||||
#define TCACHE_NSLOTS_HUGE 20
|
||||
|
||||
/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
|
||||
#define LG_TCACHE_MAXCLASS_DEFAULT 15
|
||||
@@ -113,7 +113,7 @@ extern tcache_bin_info_t *tcache_bin_info;
|
||||
|
||||
/*
|
||||
* Number of tcache bins. There are NBINS small-object bins, plus 0 or more
|
||||
* large-object bins.
|
||||
* huge-object bins.
|
||||
*/
|
||||
extern unsigned nhbins;
|
||||
|
||||
@@ -136,7 +136,7 @@ void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
|
||||
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
|
||||
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
|
||||
szind_t binind, unsigned rem);
|
||||
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
void tcache_bin_flush_huge(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
|
||||
unsigned rem, tcache_t *tcache);
|
||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
|
||||
arena_t *oldarena, arena_t *newarena);
|
||||
@@ -163,11 +163,11 @@ void tcache_enabled_set(bool enabled);
|
||||
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
||||
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
||||
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
void *tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
||||
size_t size, szind_t ind, bool zero, bool slow_path);
|
||||
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
szind_t binind, bool slow_path);
|
||||
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
void tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
||||
size_t size, bool slow_path);
|
||||
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
||||
#endif
|
||||
@@ -336,7 +336,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void *
|
||||
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
tcache_alloc_huge(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
szind_t binind, bool zero, bool slow_path)
|
||||
{
|
||||
void *ret;
|
||||
@@ -349,14 +349,14 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
assert(tcache_success == (ret != NULL));
|
||||
if (unlikely(!tcache_success)) {
|
||||
/*
|
||||
* Only allocate one large object at a time, because it's quite
|
||||
* Only allocate one huge object at a time, because it's quite
|
||||
* expensive to create one and not use it.
|
||||
*/
|
||||
arena = arena_choose(tsd, arena);
|
||||
if (unlikely(arena == NULL))
|
||||
return (NULL);
|
||||
|
||||
ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
|
||||
ret = huge_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
||||
if (ret == NULL)
|
||||
return (NULL);
|
||||
} else {
|
||||
@@ -369,14 +369,6 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
||||
assert(usize <= tcache_maxclass);
|
||||
}
|
||||
|
||||
if (config_prof && usize == LARGE_MINCLASS) {
|
||||
arena_chunk_t *chunk =(arena_chunk_t *)extent_addr_get(
|
||||
iealloc(tsd_tsdn(tsd), ret));
|
||||
size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >>
|
||||
LG_PAGE);
|
||||
arena_mapbits_large_binind_set(chunk, pageind,
|
||||
BININD_INVALID);
|
||||
}
|
||||
if (likely(!zero)) {
|
||||
if (slow_path && config_fill) {
|
||||
if (unlikely(opt_junk_alloc)) {
|
||||
@@ -424,26 +416,25 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
tcache_dalloc_huge(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
|
||||
bool slow_path)
|
||||
{
|
||||
szind_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
tcache_bin_info_t *tbin_info;
|
||||
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
||||
|
||||
binind = size2index(size);
|
||||
|
||||
if (slow_path && config_fill && unlikely(opt_junk_free))
|
||||
arena_dalloc_junk_large(ptr, size);
|
||||
huge_dalloc_junk(ptr, size);
|
||||
|
||||
tbin = &tcache->tbins[binind];
|
||||
tbin_info = &tcache_bin_info[binind];
|
||||
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
||||
tcache_bin_flush_large(tsd, tbin, binind,
|
||||
tcache_bin_flush_huge(tsd, tbin, binind,
|
||||
(tbin_info->ncached_max >> 1), tcache);
|
||||
}
|
||||
assert(tbin->ncached < tbin_info->ncached_max);
|
||||
|
Reference in New Issue
Block a user