Reduce the memory overhead of sampled small allocations
Previously, small allocations which were sampled as part of heap profiling were rounded up to `SC_LARGE_MINCLASS`. This additional memory usage becomes problematic when the page size is increased, as noted in #2358. Small allocations are now rounded up to the nearest multiple of `PAGE` instead, reducing the memory overhead by a factor of 4 in the most extreme cases.
This commit is contained in:
parent
e1338703ef
commit
5a858c64d6
@ -65,10 +65,11 @@ void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
|
|||||||
const unsigned nfill);
|
const unsigned nfill);
|
||||||
|
|
||||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||||
szind_t ind, bool zero);
|
szind_t ind, bool zero, bool slab);
|
||||||
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache);
|
size_t alignment, bool zero, bool slab, tcache_t *tcache);
|
||||||
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
|
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize,
|
||||||
|
size_t bumped_usize);
|
||||||
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
||||||
bool slow_path);
|
bool slow_path);
|
||||||
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
|
void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
|
||||||
@ -81,7 +82,7 @@ void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
|
|||||||
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t extra, bool zero, size_t *newsize);
|
size_t extra, bool zero, size_t *newsize);
|
||||||
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache,
|
||||||
hook_ralloc_args_t *hook_args);
|
hook_ralloc_args_t *hook_args);
|
||||||
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
dss_prec_t arena_dss_prec_get(arena_t *arena);
|
||||||
ehooks_t *arena_get_ehooks(arena_t *arena);
|
ehooks_t *arena_get_ehooks(arena_t *arena);
|
||||||
|
@ -182,23 +182,22 @@ arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||||
tcache_t *tcache, bool slow_path) {
|
bool slab, tcache_t *tcache, bool slow_path) {
|
||||||
assert(!tsdn_null(tsdn) || tcache == NULL);
|
assert(!tsdn_null(tsdn) || tcache == NULL);
|
||||||
|
|
||||||
if (likely(tcache != NULL)) {
|
if (likely(tcache != NULL)) {
|
||||||
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
if (likely(slab)) {
|
||||||
|
assert(sz_can_use_slab(size));
|
||||||
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
||||||
tcache, size, ind, zero, slow_path);
|
tcache, size, ind, zero, slow_path);
|
||||||
}
|
} else if (likely(size <= tcache_maxclass)) {
|
||||||
if (likely(size <= tcache_maxclass)) {
|
|
||||||
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||||
tcache, size, ind, zero, slow_path);
|
tcache, size, ind, zero, slow_path);
|
||||||
}
|
}
|
||||||
/* (size > tcache_maxclass) case falls through. */
|
/* (size > tcache_maxclass) case falls through. */
|
||||||
assert(size > tcache_maxclass);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return arena_malloc_hard(tsdn, arena, size, ind, zero);
|
return arena_malloc_hard(tsdn, arena, size, ind, zero, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE arena_t *
|
JEMALLOC_ALWAYS_INLINE arena_t *
|
||||||
|
@ -52,10 +52,12 @@ isalloc(tsdn_t *tsdn, const void *ptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
iallocztm_explicit_slab(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
|
||||||
bool is_internal, arena_t *arena, bool slow_path) {
|
bool slab, tcache_t *tcache, bool is_internal, arena_t *arena,
|
||||||
|
bool slow_path) {
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
assert(!slab || sz_can_use_slab(size)); /* slab && large is illegal */
|
||||||
assert(!is_internal || tcache == NULL);
|
assert(!is_internal || tcache == NULL);
|
||||||
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
assert(!is_internal || arena == NULL || arena_is_auto(arena));
|
||||||
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
|
if (!tsdn_null(tsdn) && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) == 0) {
|
||||||
@ -63,13 +65,21 @@ iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
|||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
|
ret = arena_malloc(tsdn, arena, size, ind, zero, slab, tcache, slow_path);
|
||||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||||
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
|
||||||
|
bool is_internal, arena_t *arena, bool slow_path) {
|
||||||
|
bool slab = sz_can_use_slab(size);
|
||||||
|
return iallocztm_explicit_slab(tsdn, size, ind, zero, slab, tcache,
|
||||||
|
is_internal, arena, slow_path);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
||||||
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
|
return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false,
|
||||||
@ -77,10 +87,11 @@ ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
ipallocztm_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||||
tcache_t *tcache, bool is_internal, arena_t *arena) {
|
bool slab, tcache_t *tcache, bool is_internal, arena_t *arena) {
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
assert(!slab || sz_can_use_slab(usize)); /* slab && large is illegal */
|
||||||
assert(usize != 0);
|
assert(usize != 0);
|
||||||
assert(usize == sz_sa2u(usize, alignment));
|
assert(usize == sz_sa2u(usize, alignment));
|
||||||
assert(!is_internal || tcache == NULL);
|
assert(!is_internal || tcache == NULL);
|
||||||
@ -88,7 +99,7 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
|
|
||||||
ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
|
ret = arena_palloc(tsdn, arena, usize, alignment, zero, slab, tcache);
|
||||||
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
|
||||||
if (config_stats && is_internal && likely(ret != NULL)) {
|
if (config_stats && is_internal && likely(ret != NULL)) {
|
||||||
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret));
|
||||||
@ -96,12 +107,26 @@ ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||||
|
tcache_t *tcache, bool is_internal, arena_t *arena) {
|
||||||
|
return ipallocztm_explicit_slab(tsdn, usize, alignment, zero,
|
||||||
|
sz_can_use_slab(usize), tcache, is_internal, arena);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
|
||||||
tcache_t *tcache, arena_t *arena) {
|
tcache_t *tcache, arena_t *arena) {
|
||||||
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
|
return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
ipalloct_explicit_slab(tsdn_t *tsdn, size_t usize, size_t alignment,
|
||||||
|
bool zero, bool slab, tcache_t *tcache, arena_t *arena) {
|
||||||
|
return ipallocztm_explicit_slab(tsdn, usize, alignment, zero, slab,
|
||||||
|
tcache, false, arena);
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
|
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) {
|
||||||
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
|
return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
|
||||||
@ -146,7 +171,7 @@ isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||||
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
|
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena,
|
||||||
hook_ralloc_args_t *hook_args) {
|
hook_ralloc_args_t *hook_args) {
|
||||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||||
WITNESS_RANK_CORE, 0);
|
WITNESS_RANK_CORE, 0);
|
||||||
@ -157,7 +182,8 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
p = ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
p = ipalloct_explicit_slab(tsdn, usize, alignment, zero, slab,
|
||||||
|
tcache, arena);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -184,8 +210,9 @@ iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
|||||||
* passed-around anywhere.
|
* passed-around anywhere.
|
||||||
*/
|
*/
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
iralloct_explicit_slab(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
|
||||||
bool zero, tcache_t *tcache, arena_t *arena, hook_ralloc_args_t *hook_args)
|
size_t alignment, bool zero, bool slab, tcache_t *tcache, arena_t *arena,
|
||||||
|
hook_ralloc_args_t *hook_args)
|
||||||
{
|
{
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(size != 0);
|
assert(size != 0);
|
||||||
@ -199,18 +226,28 @@ iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
|||||||
* and copy.
|
* and copy.
|
||||||
*/
|
*/
|
||||||
return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
|
return iralloct_realign(tsdn, ptr, oldsize, size, alignment,
|
||||||
zero, tcache, arena, hook_args);
|
zero, slab, tcache, arena, hook_args);
|
||||||
}
|
}
|
||||||
|
|
||||||
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
|
return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero,
|
||||||
tcache, hook_args);
|
slab, tcache, hook_args);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
|
iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||||
|
size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
|
||||||
|
hook_ralloc_args_t *hook_args)
|
||||||
|
{
|
||||||
|
bool slab = sz_can_use_slab(usize);
|
||||||
|
return iralloct_explicit_slab(tsdn, ptr, oldsize, size, alignment, zero,
|
||||||
|
slab, tcache, arena, hook_args);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
|
||||||
bool zero, hook_ralloc_args_t *hook_args) {
|
size_t usize, bool zero, hook_ralloc_args_t *hook_args) {
|
||||||
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero,
|
return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, usize,
|
||||||
tcache_get(tsd), NULL, hook_args);
|
zero, tcache_get(tsd), NULL, hook_args);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
@ -405,7 +442,7 @@ maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
|
|||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
prof_sample_aligned(const void *ptr) {
|
prof_sample_aligned(const void *ptr) {
|
||||||
return ((uintptr_t)ptr & PAGE_MASK) == 0;
|
return ((uintptr_t)ptr & PROF_SAMPLE_ALIGNMENT_MASK) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||||
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H
|
||||||
|
|
||||||
|
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
|
||||||
|
extern size_t os_page;
|
||||||
|
|
||||||
/* Page size. LG_PAGE is determined by the configure script. */
|
/* Page size. LG_PAGE is determined by the configure script. */
|
||||||
#ifdef PAGE_MASK
|
#ifdef PAGE_MASK
|
||||||
# undef PAGE_MASK
|
# undef PAGE_MASK
|
||||||
|
@ -239,14 +239,15 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE size_t
|
JEMALLOC_ALWAYS_INLINE size_t
|
||||||
prof_sample_align(size_t orig_align) {
|
prof_sample_align(size_t usize, size_t orig_align) {
|
||||||
/*
|
/*
|
||||||
* Enforce page alignment, so that sampled allocations can be identified
|
* Enforce alignment, so that sampled allocations can be identified
|
||||||
* w/o metadata lookup.
|
* w/o metadata lookup.
|
||||||
*/
|
*/
|
||||||
assert(opt_prof);
|
assert(opt_prof);
|
||||||
return (opt_cache_oblivious && orig_align < PAGE) ? PAGE :
|
return (orig_align < PROF_SAMPLE_ALIGNMENT &&
|
||||||
orig_align;
|
(sz_can_use_slab(usize) || opt_cache_oblivious)) ?
|
||||||
|
PROF_SAMPLE_ALIGNMENT : orig_align;
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE bool
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
@ -80,4 +80,12 @@ typedef struct prof_recent_s prof_recent_t;
|
|||||||
/* Thread name storage size limit. */
|
/* Thread name storage size limit. */
|
||||||
#define PROF_THREAD_NAME_MAX_LEN 16
|
#define PROF_THREAD_NAME_MAX_LEN 16
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Minimum required alignment for sampled allocations. Over-aligning sampled
|
||||||
|
* allocations allows us to quickly identify them on the dalloc path without
|
||||||
|
* resorting to metadata lookup.
|
||||||
|
*/
|
||||||
|
#define PROF_SAMPLE_ALIGNMENT PAGE
|
||||||
|
#define PROF_SAMPLE_ALIGNMENT_MASK PAGE_MASK
|
||||||
|
|
||||||
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
|
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
|
|
||||||
#define SAFETY_CHECK_DOUBLE_FREE_MAX_SCAN_DEFAULT 32
|
#define SAFETY_CHECK_DOUBLE_FREE_MAX_SCAN_DEFAULT 32
|
||||||
|
|
||||||
|
#include "jemalloc/internal/pages.h"
|
||||||
|
|
||||||
void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
|
void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
|
||||||
size_t true_size, size_t input_size);
|
size_t true_size, size_t input_size);
|
||||||
void safety_check_fail(const char *format, ...);
|
void safety_check_fail(const char *format, ...);
|
||||||
@ -12,22 +14,50 @@ typedef void (*safety_check_abort_hook_t)(const char *message);
|
|||||||
/* Can set to NULL for a default. */
|
/* Can set to NULL for a default. */
|
||||||
void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
|
void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
|
||||||
|
|
||||||
|
#define REDZONE_SIZE ((size_t) 32)
|
||||||
|
#define REDZONE_FILL_VALUE 0xBC
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Normally the redzone extends `REDZONE_SIZE` bytes beyond the end of
|
||||||
|
* the allocation. However, we don't let the redzone extend onto another
|
||||||
|
* OS page because this would impose additional overhead if that page was
|
||||||
|
* not already resident in memory.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE const unsigned char *
|
||||||
|
compute_redzone_end(const void *_ptr, size_t usize, size_t bumped_usize) {
|
||||||
|
const unsigned char *ptr = (const unsigned char *) _ptr;
|
||||||
|
const unsigned char *redzone_end = usize + REDZONE_SIZE < bumped_usize ?
|
||||||
|
&ptr[usize + REDZONE_SIZE] : &ptr[bumped_usize];
|
||||||
|
const unsigned char *page_end = (const unsigned char *)
|
||||||
|
ALIGNMENT_CEILING(((uintptr_t) (&ptr[usize])), os_page);
|
||||||
|
return redzone_end < page_end ? redzone_end : page_end;
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
|
safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
|
||||||
assert(usize < bumped_usize);
|
assert(usize <= bumped_usize);
|
||||||
for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
|
const unsigned char *redzone_end =
|
||||||
*((unsigned char *)ptr + i) = 0xBC;
|
compute_redzone_end(ptr, usize, bumped_usize);
|
||||||
|
for (unsigned char *curr = &((unsigned char *)ptr)[usize];
|
||||||
|
curr < redzone_end; curr++) {
|
||||||
|
*curr = REDZONE_FILL_VALUE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void
|
JEMALLOC_ALWAYS_INLINE void
|
||||||
safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
|
safety_check_verify_redzone(const void *ptr, size_t usize, size_t bumped_usize)
|
||||||
{
|
{
|
||||||
for (size_t i = usize; i < bumped_usize && i < usize + 32; ++i) {
|
const unsigned char *redzone_end =
|
||||||
if (unlikely(*((unsigned char *)ptr + i) != 0xBC)) {
|
compute_redzone_end(ptr, usize, bumped_usize);
|
||||||
|
for (const unsigned char *curr= &((const unsigned char *)ptr)[usize];
|
||||||
|
curr < redzone_end; curr++) {
|
||||||
|
if (unlikely(*curr != REDZONE_FILL_VALUE)) {
|
||||||
safety_check_fail("Use after free error\n");
|
safety_check_fail("Use after free error\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef REDZONE_SIZE
|
||||||
|
#undef REDZONE_FILL_VALUE
|
||||||
|
|
||||||
#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
|
#endif /*JEMALLOC_INTERNAL_SAFETY_CHECK_H */
|
||||||
|
@ -365,6 +365,21 @@ sz_sa2u(size_t size, size_t alignment) {
|
|||||||
return usize;
|
return usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Under normal circumstances, whether or not to use a slab
|
||||||
|
* to satisfy an allocation depends solely on the allocation's
|
||||||
|
* effective size. However, this is *not* the case when an allocation
|
||||||
|
* is sampled for profiling, in which case you *must not* use a slab
|
||||||
|
* regardless of the effective size. Thus `sz_can_use_slab` is called
|
||||||
|
* on the common path, but there exist `*_explicit_slab` variants of
|
||||||
|
* several functions for handling the aforementioned case of
|
||||||
|
* sampled allocations.
|
||||||
|
*/
|
||||||
|
JEMALLOC_ALWAYS_INLINE bool
|
||||||
|
sz_can_use_slab(size_t size) {
|
||||||
|
return size <= SC_SMALL_MAXCLASS;
|
||||||
|
}
|
||||||
|
|
||||||
size_t sz_psz_quantize_floor(size_t size);
|
size_t sz_psz_quantize_floor(size_t size);
|
||||||
size_t sz_psz_quantize_ceil(size_t size);
|
size_t sz_psz_quantize_ceil(size_t size);
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
|||||||
if (unlikely(tcache_small_bin_disabled(binind, bin))) {
|
if (unlikely(tcache_small_bin_disabled(binind, bin))) {
|
||||||
/* stats and zero are handled directly by the arena. */
|
/* stats and zero are handled directly by the arena. */
|
||||||
return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
|
return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
|
||||||
binind, zero);
|
binind, zero, /* slab */ true);
|
||||||
}
|
}
|
||||||
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
|
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
|
||||||
/* is_small */ true);
|
/* is_small */ true);
|
||||||
|
64
src/arena.c
64
src/arena.c
@ -1191,7 +1191,7 @@ arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
|
|||||||
|
|
||||||
void *
|
void *
|
||||||
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
||||||
bool zero) {
|
bool zero, bool slab) {
|
||||||
assert(!tsdn_null(tsdn) || arena != NULL);
|
assert(!tsdn_null(tsdn) || arena != NULL);
|
||||||
|
|
||||||
if (likely(!tsdn_null(tsdn))) {
|
if (likely(!tsdn_null(tsdn))) {
|
||||||
@ -1201,18 +1201,19 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(size <= SC_SMALL_MAXCLASS)) {
|
if (likely(slab)) {
|
||||||
|
assert(sz_can_use_slab(size));
|
||||||
return arena_malloc_small(tsdn, arena, ind, zero);
|
return arena_malloc_small(tsdn, arena, ind, zero);
|
||||||
}
|
} else {
|
||||||
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
|
return large_malloc(tsdn, arena, sz_index2size(ind), zero);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
||||||
bool zero, tcache_t *tcache) {
|
bool zero, bool slab, tcache_t *tcache) {
|
||||||
void *ret;
|
if (slab) {
|
||||||
|
assert(sz_can_use_slab(usize));
|
||||||
if (usize <= SC_SMALL_MAXCLASS) {
|
|
||||||
/* Small; alignment doesn't require special slab placement. */
|
/* Small; alignment doesn't require special slab placement. */
|
||||||
|
|
||||||
/* usize should be a result of sz_sa2u() */
|
/* usize should be a result of sz_sa2u() */
|
||||||
@ -1223,27 +1224,26 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
|
|||||||
*/
|
*/
|
||||||
assert(alignment <= PAGE);
|
assert(alignment <= PAGE);
|
||||||
|
|
||||||
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
||||||
zero, tcache, true);
|
zero, slab, tcache, true);
|
||||||
} else {
|
} else {
|
||||||
if (likely(alignment <= CACHELINE)) {
|
if (likely(alignment <= CACHELINE)) {
|
||||||
ret = large_malloc(tsdn, arena, usize, zero);
|
return large_malloc(tsdn, arena, usize, zero);
|
||||||
} else {
|
} else {
|
||||||
ret = large_palloc(tsdn, arena, usize, alignment, zero);
|
return large_palloc(tsdn, arena, usize, alignment, zero);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
|
arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize, size_t bumped_usize) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
assert(isalloc(tsdn, ptr) == bumped_usize);
|
||||||
assert(usize <= SC_SMALL_MAXCLASS);
|
assert(sz_can_use_slab(usize));
|
||||||
|
|
||||||
if (config_opt_safety_checks) {
|
if (config_opt_safety_checks) {
|
||||||
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
|
safety_check_set_redzone(ptr, usize, bumped_usize);
|
||||||
}
|
}
|
||||||
|
|
||||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
|
||||||
@ -1259,13 +1259,19 @@ static size_t
|
|||||||
arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
|
arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
|
||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
size_t usize = isalloc(tsdn, ptr);
|
||||||
|
size_t bumped_usize = sz_sa2u(usize, PROF_SAMPLE_ALIGNMENT);
|
||||||
|
assert(bumped_usize <= SC_LARGE_MINCLASS &&
|
||||||
|
PAGE_CEILING(bumped_usize) == bumped_usize);
|
||||||
|
assert(edata_size_get(edata) - bumped_usize <= sz_large_pad);
|
||||||
|
szind_t szind = sz_size2index(bumped_usize);
|
||||||
|
|
||||||
edata_szind_set(edata, SC_NBINS);
|
edata_szind_set(edata, szind);
|
||||||
emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
|
emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
|
||||||
|
|
||||||
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
assert(isalloc(tsdn, ptr) == bumped_usize);
|
||||||
|
|
||||||
return SC_LARGE_MINCLASS;
|
return bumped_usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -1282,10 +1288,10 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
|
|||||||
* Currently, we only do redzoning for small sampled
|
* Currently, we only do redzoning for small sampled
|
||||||
* allocations.
|
* allocations.
|
||||||
*/
|
*/
|
||||||
assert(bumped_usize == SC_LARGE_MINCLASS);
|
|
||||||
safety_check_verify_redzone(ptr, usize, bumped_usize);
|
safety_check_verify_redzone(ptr, usize, bumped_usize);
|
||||||
}
|
}
|
||||||
if (bumped_usize <= tcache_maxclass && tcache != NULL) {
|
if (bumped_usize >= SC_LARGE_MINCLASS &&
|
||||||
|
bumped_usize <= tcache_maxclass && tcache != NULL) {
|
||||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
|
||||||
sz_size2index(bumped_usize), slow_path);
|
sz_size2index(bumped_usize), slow_path);
|
||||||
} else {
|
} else {
|
||||||
@ -1443,28 +1449,30 @@ done:
|
|||||||
|
|
||||||
static void *
|
static void *
|
||||||
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
|
||||||
size_t alignment, bool zero, tcache_t *tcache) {
|
size_t alignment, bool zero, bool slab, tcache_t *tcache) {
|
||||||
if (alignment == 0) {
|
if (alignment == 0) {
|
||||||
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
|
||||||
zero, tcache, true);
|
zero, slab, tcache, true);
|
||||||
}
|
}
|
||||||
usize = sz_sa2u(usize, alignment);
|
usize = sz_sa2u(usize, alignment);
|
||||||
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
|
return ipalloct_explicit_slab(tsdn, usize, alignment, zero, slab,
|
||||||
|
tcache, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
void *
|
||||||
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
||||||
size_t size, size_t alignment, bool zero, tcache_t *tcache,
|
size_t size, size_t alignment, bool zero, bool slab, tcache_t *tcache,
|
||||||
hook_ralloc_args_t *hook_args) {
|
hook_ralloc_args_t *hook_args) {
|
||||||
size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
|
size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
|
||||||
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
|
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(usize <= SC_SMALL_MAXCLASS)) {
|
if (likely(slab)) {
|
||||||
|
assert(sz_can_use_slab(usize));
|
||||||
/* Try to avoid moving the allocation. */
|
/* Try to avoid moving the allocation. */
|
||||||
UNUSED size_t newsize;
|
UNUSED size_t newsize;
|
||||||
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
|
if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero,
|
||||||
@ -1488,7 +1496,7 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
|
|||||||
* object. In that case, fall back to allocating new space and copying.
|
* object. In that case, fall back to allocating new space and copying.
|
||||||
*/
|
*/
|
||||||
void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
|
void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
|
||||||
zero, tcache);
|
zero, slab, tcache);
|
||||||
if (ret == NULL) {
|
if (ret == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2360,7 +2360,7 @@ arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) {
|
|||||||
/* ind is ignored if dopts->alignment > 0. */
|
/* ind is ignored if dopts->alignment > 0. */
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
||||||
size_t size, size_t usize, szind_t ind) {
|
size_t size, size_t usize, szind_t ind, bool slab) {
|
||||||
/* Fill in the tcache. */
|
/* Fill in the tcache. */
|
||||||
tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
|
tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
|
||||||
sopts->slow, /* is_alloc */ true);
|
sopts->slow, /* is_alloc */ true);
|
||||||
@ -2372,12 +2372,12 @@ imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(dopts->alignment != 0)) {
|
if (unlikely(dopts->alignment != 0)) {
|
||||||
return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
|
return ipalloct_explicit_slab(tsd_tsdn(tsd), usize,
|
||||||
dopts->zero, tcache, arena);
|
dopts->alignment, dopts->zero, slab, tcache, arena);
|
||||||
}
|
}
|
||||||
|
|
||||||
return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
|
return iallocztm_explicit_slab(tsd_tsdn(tsd), size, ind, dopts->zero,
|
||||||
arena, sopts->slow);
|
slab, tcache, false, arena, sopts->slow);
|
||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_ALWAYS_INLINE void *
|
JEMALLOC_ALWAYS_INLINE void *
|
||||||
@ -2385,28 +2385,26 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
|
|||||||
size_t usize, szind_t ind) {
|
size_t usize, szind_t ind) {
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
|
dopts->alignment = prof_sample_align(usize, dopts->alignment);
|
||||||
/*
|
/*
|
||||||
* For small allocations, sampling bumps the usize. If so, we allocate
|
* If the allocation is small enough that it would normally be allocated
|
||||||
* from the ind_large bucket.
|
* on a slab, we need to take additional steps to ensure that it gets
|
||||||
|
* its own extent instead.
|
||||||
*/
|
*/
|
||||||
szind_t ind_large;
|
if (sz_can_use_slab(usize)) {
|
||||||
|
assert((dopts->alignment & PROF_SAMPLE_ALIGNMENT_MASK) == 0);
|
||||||
dopts->alignment = prof_sample_align(dopts->alignment);
|
size_t bumped_usize = sz_sa2u(usize, dopts->alignment);
|
||||||
if (usize <= SC_SMALL_MAXCLASS) {
|
szind_t bumped_ind = sz_size2index(bumped_usize);
|
||||||
assert(((dopts->alignment == 0) ?
|
dopts->tcache_ind = TCACHE_IND_NONE;
|
||||||
sz_s2u(SC_LARGE_MINCLASS) :
|
|
||||||
sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
|
|
||||||
== SC_LARGE_MINCLASS);
|
|
||||||
ind_large = sz_size2index(SC_LARGE_MINCLASS);
|
|
||||||
size_t bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
|
|
||||||
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
|
ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
|
||||||
bumped_usize, ind_large);
|
bumped_usize, bumped_ind, /* slab */ false);
|
||||||
if (unlikely(ret == NULL)) {
|
if (unlikely(ret == NULL)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
arena_prof_promote(tsd_tsdn(tsd), ret, usize);
|
arena_prof_promote(tsd_tsdn(tsd), ret, usize, bumped_usize);
|
||||||
} else {
|
} else {
|
||||||
ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
|
ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind,
|
||||||
|
/* slab */ false);
|
||||||
}
|
}
|
||||||
assert(prof_sample_aligned(ret));
|
assert(prof_sample_aligned(ret));
|
||||||
|
|
||||||
@ -2532,9 +2530,10 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
|||||||
|
|
||||||
emap_alloc_ctx_t alloc_ctx;
|
emap_alloc_ctx_t alloc_ctx;
|
||||||
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
|
||||||
alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
|
alloc_ctx.slab = sz_can_use_slab(usize);
|
||||||
allocation = imalloc_no_sample(
|
allocation = imalloc_no_sample(
|
||||||
sopts, dopts, tsd, usize, usize, ind);
|
sopts, dopts, tsd, usize, usize, ind,
|
||||||
|
alloc_ctx.slab);
|
||||||
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
|
||||||
allocation = imalloc_sample(
|
allocation = imalloc_sample(
|
||||||
sopts, dopts, tsd, usize, ind);
|
sopts, dopts, tsd, usize, ind);
|
||||||
@ -2551,7 +2550,7 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
|
|||||||
} else {
|
} else {
|
||||||
assert(!opt_prof);
|
assert(!opt_prof);
|
||||||
allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
|
allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
|
||||||
ind);
|
ind, sz_can_use_slab(usize));
|
||||||
if (unlikely(allocation == NULL)) {
|
if (unlikely(allocation == NULL)) {
|
||||||
goto label_oom;
|
goto label_oom;
|
||||||
}
|
}
|
||||||
@ -3314,18 +3313,25 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
alignment = prof_sample_align(alignment);
|
alignment = prof_sample_align(usize, alignment);
|
||||||
if (usize <= SC_SMALL_MAXCLASS) {
|
/*
|
||||||
p = iralloct(tsdn, old_ptr, old_usize,
|
* If the allocation is small enough that it would normally be allocated
|
||||||
SC_LARGE_MINCLASS, alignment, zero, tcache,
|
* on a slab, we need to take additional steps to ensure that it gets
|
||||||
arena, hook_args);
|
* its own extent instead.
|
||||||
|
*/
|
||||||
|
if (sz_can_use_slab(usize)) {
|
||||||
|
size_t bumped_usize = sz_sa2u(usize, alignment);
|
||||||
|
p = iralloct_explicit_slab(tsdn, old_ptr, old_usize,
|
||||||
|
bumped_usize, alignment, zero, /* slab */ false,
|
||||||
|
tcache, arena, hook_args);
|
||||||
if (p == NULL) {
|
if (p == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
arena_prof_promote(tsdn, p, usize);
|
arena_prof_promote(tsdn, p, usize, bumped_usize);
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
|
p = iralloct_explicit_slab(tsdn, old_ptr, old_usize, usize,
|
||||||
tcache, arena, hook_args);
|
alignment, zero, /* slab */ false, tcache, arena,
|
||||||
|
hook_args);
|
||||||
}
|
}
|
||||||
assert(prof_sample_aligned(p));
|
assert(prof_sample_aligned(p));
|
||||||
|
|
||||||
@ -3348,7 +3354,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
|
|||||||
usize, alignment, zero, tcache, arena, tctx, hook_args);
|
usize, alignment, zero, tcache, arena, tctx, hook_args);
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
|
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
|
||||||
zero, tcache, arena, hook_args);
|
usize, zero, tcache, arena, hook_args);
|
||||||
}
|
}
|
||||||
if (unlikely(p == NULL)) {
|
if (unlikely(p == NULL)) {
|
||||||
prof_alloc_rollback(tsd, tctx);
|
prof_alloc_rollback(tsd, tctx);
|
||||||
@ -3407,7 +3413,7 @@ do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
|
p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
|
||||||
zero, tcache, arena, &hook_args);
|
usize, zero, tcache, arena, &hook_args);
|
||||||
if (unlikely(p == NULL)) {
|
if (unlikely(p == NULL)) {
|
||||||
goto label_oom;
|
goto label_oom;
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
/* Data. */
|
/* Data. */
|
||||||
|
|
||||||
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
|
/* Actual operating system page size, detected during bootstrap, <= PAGE. */
|
||||||
static size_t os_page;
|
size_t os_page;
|
||||||
|
|
||||||
#ifndef _WIN32
|
#ifndef _WIN32
|
||||||
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
|
# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
|
||||||
|
Loading…
Reference in New Issue
Block a user