Compare commits
No commits in common. "e4817c8d89a2a413e835c4adeab5c5c4412f9235" and "6b197fdd460be8bf3379da91d42e677dd5b5437a" have entirely different histories.
e4817c8d89
...
6b197fdd46
@ -155,7 +155,6 @@ C_SRCS := $(srcroot)src/jemalloc.c \
|
||||
$(srcroot)src/thread_event.c \
|
||||
$(srcroot)src/ticker.c \
|
||||
$(srcroot)src/tsd.c \
|
||||
$(srcroot)src/util.c \
|
||||
$(srcroot)src/witness.c
|
||||
ifeq ($(enable_zone_allocator), 1)
|
||||
C_SRCS += $(srcroot)src/zone.c
|
||||
@ -242,7 +241,6 @@ TESTS_UNIT := \
|
||||
$(srcroot)test/unit/mq.c \
|
||||
$(srcroot)test/unit/mtx.c \
|
||||
$(srcroot)test/unit/nstime.c \
|
||||
$(srcroot)test/unit/ncached_max.c \
|
||||
$(srcroot)test/unit/oversize_threshold.c \
|
||||
$(srcroot)test/unit/pa.c \
|
||||
$(srcroot)test/unit/pack.c \
|
||||
|
@ -63,7 +63,8 @@ void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
|
||||
void arena_reset(tsd_t *tsd, arena_t *arena);
|
||||
void arena_destroy(tsd_t *tsd, arena_t *arena);
|
||||
void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
|
||||
cache_bin_t *cache_bin, szind_t binind, const unsigned nfill);
|
||||
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
|
||||
const unsigned nfill);
|
||||
|
||||
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
|
||||
szind_t ind, bool zero, bool slab);
|
||||
|
@ -198,8 +198,7 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
|
||||
assert(sz_can_use_slab(size));
|
||||
return tcache_alloc_small(tsdn_tsd(tsdn), arena,
|
||||
tcache, size, ind, zero, slow_path);
|
||||
} else if (likely(
|
||||
ind < tcache_nbins_get(tcache->tcache_slow) &&
|
||||
} else if (likely(ind < TCACHE_NBINS_MAX &&
|
||||
!tcache_bin_disabled(ind, &tcache->bins[ind],
|
||||
tcache->tcache_slow))) {
|
||||
return tcache_alloc_large(tsdn_tsd(tsdn), arena,
|
||||
@ -301,24 +300,23 @@ JEMALLOC_ALWAYS_INLINE void
|
||||
arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
|
||||
bool slow_path) {
|
||||
assert (!tsdn_null(tsdn) && tcache != NULL);
|
||||
bool is_sample_promoted = config_prof && szind < SC_NBINS;
|
||||
if (unlikely(is_sample_promoted)) {
|
||||
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
||||
} else {
|
||||
if (szind < tcache_nbins_get(tcache->tcache_slow) &&
|
||||
!tcache_bin_disabled(szind, &tcache->bins[szind],
|
||||
tcache->tcache_slow)) {
|
||||
if (szind < TCACHE_NBINS_MAX &&
|
||||
!tcache_bin_disabled(szind, &tcache->bins[szind],
|
||||
tcache->tcache_slow)) {
|
||||
if (config_prof && unlikely(szind < SC_NBINS)) {
|
||||
arena_dalloc_promoted(tsdn, ptr, tcache, slow_path);
|
||||
} else {
|
||||
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, szind,
|
||||
slow_path);
|
||||
} else {
|
||||
edata_t *edata = emap_edata_lookup(tsdn,
|
||||
&arena_emap_global, ptr);
|
||||
if (large_dalloc_safety_checks(edata, ptr, szind)) {
|
||||
/* See the comment in isfree. */
|
||||
return;
|
||||
}
|
||||
large_dalloc(tsdn, edata);
|
||||
}
|
||||
} else {
|
||||
edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
|
||||
ptr);
|
||||
if (large_dalloc_safety_checks(edata, ptr, szind)) {
|
||||
/* See the comment in isfree. */
|
||||
return;
|
||||
}
|
||||
large_dalloc(tsdn, edata);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,13 +98,10 @@ struct arena_s {
|
||||
/*
|
||||
* The arena is allocated alongside its bins; really this is a
|
||||
* dynamically sized array determined by the binshard settings.
|
||||
* Enforcing cacheline-alignment to minimize the number of cachelines
|
||||
* touched on the hot paths.
|
||||
*/
|
||||
JEMALLOC_WARN_ON_USAGE("Do not use this field directly. "
|
||||
"Use `arena_get_bin` instead.")
|
||||
JEMALLOC_ALIGNED(CACHELINE)
|
||||
bin_t all_bins[0];
|
||||
bin_t all_bins[0];
|
||||
};
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
|
||||
|
@ -202,17 +202,12 @@ cache_bin_disabled(cache_bin_t *bin) {
|
||||
return disabled;
|
||||
}
|
||||
|
||||
/* Gets ncached_max without asserting that the bin is enabled. */
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_ncached_max_get_unsafe(cache_bin_t *bin) {
|
||||
return bin->bin_info.ncached_max;
|
||||
}
|
||||
|
||||
/* Returns ncached_max: Upper limit on ncached. */
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_ncached_max_get(cache_bin_t *bin) {
|
||||
cache_bin_info_ncached_max_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
assert(!cache_bin_disabled(bin));
|
||||
return cache_bin_ncached_max_get_unsafe(bin);
|
||||
assert(info == &bin->bin_info);
|
||||
return info->ncached_max;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -234,7 +229,7 @@ cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
|
||||
* Does difference calculations that handle wraparound correctly. Earlier must
|
||||
* be associated with the position earlier in memory.
|
||||
*/
|
||||
static inline cache_bin_sz_t
|
||||
static inline uint16_t
|
||||
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
|
||||
cache_bin_assert_earlier(bin, earlier, later);
|
||||
return later - earlier;
|
||||
@ -267,9 +262,9 @@ cache_bin_ncached_get_internal(cache_bin_t *bin) {
|
||||
* possible.
|
||||
*/
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_ncached_get_local(cache_bin_t *bin) {
|
||||
cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin);
|
||||
assert(n <= cache_bin_ncached_max_get(bin));
|
||||
assert(n <= cache_bin_info_ncached_max_get(bin, info));
|
||||
return n;
|
||||
}
|
||||
|
||||
@ -304,9 +299,9 @@ cache_bin_empty_position_get(cache_bin_t *bin) {
|
||||
* arena statistics collection.
|
||||
*/
|
||||
static inline uint16_t
|
||||
cache_bin_low_bits_low_bound_get(cache_bin_t *bin) {
|
||||
cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
return (uint16_t)bin->low_bits_empty -
|
||||
cache_bin_ncached_max_get(bin) * sizeof(void *);
|
||||
cache_bin_info_ncached_max_get(bin, info) * sizeof(void *);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -315,8 +310,8 @@ cache_bin_low_bits_low_bound_get(cache_bin_t *bin) {
|
||||
* A pointer to the position with the lowest address of the backing array.
|
||||
*/
|
||||
static inline void **
|
||||
cache_bin_low_bound_get(cache_bin_t *bin) {
|
||||
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin);
|
||||
cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
|
||||
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
|
||||
assert(ret <= bin->stack_head);
|
||||
|
||||
@ -328,8 +323,8 @@ cache_bin_low_bound_get(cache_bin_t *bin) {
|
||||
* batch fill a nonempty cache bin.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_assert_empty(cache_bin_t *bin) {
|
||||
assert(cache_bin_ncached_get_local(bin) == 0);
|
||||
cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
assert(cache_bin_ncached_get_local(bin, info) == 0);
|
||||
assert(cache_bin_empty_position_get(bin) == bin->stack_head);
|
||||
}
|
||||
|
||||
@ -346,10 +341,10 @@ cache_bin_low_water_get_internal(cache_bin_t *bin) {
|
||||
|
||||
/* Returns the numeric value of low water in [0, ncached]. */
|
||||
static inline cache_bin_sz_t
|
||||
cache_bin_low_water_get(cache_bin_t *bin) {
|
||||
cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
|
||||
assert(low_water <= cache_bin_ncached_max_get(bin));
|
||||
assert(low_water <= cache_bin_ncached_get_local(bin));
|
||||
assert(low_water <= cache_bin_info_ncached_max_get(bin, info));
|
||||
assert(low_water <= cache_bin_ncached_get_local(bin, info));
|
||||
|
||||
cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
|
||||
bin->low_bits_low_water);
|
||||
@ -530,16 +525,17 @@ cache_bin_stash(cache_bin_t *bin, void *ptr) {
|
||||
|
||||
/* Get the number of stashed pointers. */
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
||||
cache_bin_nstashed_get_internal(cache_bin_t *bin) {
|
||||
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get(bin);
|
||||
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin);
|
||||
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
|
||||
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
|
||||
info);
|
||||
|
||||
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
|
||||
bin->low_bits_full) / sizeof(void *);
|
||||
assert(n <= ncached_max);
|
||||
if (config_debug && n != 0) {
|
||||
/* Below are for assertions only. */
|
||||
void **low_bound = cache_bin_low_bound_get(bin);
|
||||
void **low_bound = cache_bin_low_bound_get(bin, info);
|
||||
|
||||
assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
|
||||
void *stashed = *(low_bound + n - 1);
|
||||
@ -555,9 +551,9 @@ cache_bin_nstashed_get_internal(cache_bin_t *bin) {
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
||||
cache_bin_nstashed_get_local(cache_bin_t *bin) {
|
||||
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin);
|
||||
assert(n <= cache_bin_ncached_max_get(bin));
|
||||
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info);
|
||||
assert(n <= cache_bin_info_ncached_max_get(bin, info));
|
||||
return n;
|
||||
}
|
||||
|
||||
@ -578,26 +574,29 @@ cache_bin_nstashed_get_local(cache_bin_t *bin) {
|
||||
* This function should not call other utility functions because the racy
|
||||
* condition may cause unexpected / undefined behaviors in unverified utility
|
||||
* functions. Currently, this function calls two utility functions
|
||||
* cache_bin_ncached_max_get and cache_bin_low_bits_low_bound_get because
|
||||
* cache_bin_info_ncached_max_get and cache_bin_low_bits_low_bound_get because
|
||||
* they help access values that will not be concurrently modified.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_sz_t *ncached,
|
||||
cache_bin_sz_t *nstashed) {
|
||||
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
|
||||
/* Racy version of cache_bin_ncached_get_internal. */
|
||||
cache_bin_sz_t diff = bin->low_bits_empty -
|
||||
(uint16_t)(uintptr_t)bin->stack_head;
|
||||
cache_bin_sz_t n = diff / sizeof(void *);
|
||||
|
||||
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
|
||||
assert(n <= ncached_max);
|
||||
*ncached = n;
|
||||
|
||||
/* Racy version of cache_bin_nstashed_get_internal. */
|
||||
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin);
|
||||
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
|
||||
info);
|
||||
n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *);
|
||||
|
||||
assert(n <= ncached_max);
|
||||
*nstashed = n;
|
||||
/*
|
||||
* Note that cannot assert anything regarding ncached_max because
|
||||
* it can be configured on the fly and is thus racy.
|
||||
*/
|
||||
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -641,9 +640,9 @@ struct cache_bin_ptr_array_s {
|
||||
* finish_fill call before doing any alloc/dalloc operations on the bin.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
|
||||
cache_bin_sz_t nfill) {
|
||||
cache_bin_assert_empty(bin);
|
||||
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
|
||||
cache_bin_assert_empty(bin, info);
|
||||
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
|
||||
}
|
||||
|
||||
@ -653,9 +652,9 @@ cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
|
||||
* case of OOM.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
|
||||
cache_bin_sz_t nfilled) {
|
||||
cache_bin_assert_empty(bin);
|
||||
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
|
||||
cache_bin_assert_empty(bin, info);
|
||||
void **empty_position = cache_bin_empty_position_get(bin);
|
||||
if (nfilled < arr->n) {
|
||||
memmove(empty_position - nfilled, empty_position - arr->n,
|
||||
@ -669,17 +668,17 @@ cache_bin_finish_fill(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
|
||||
* everything we give them.
|
||||
*/
|
||||
static inline void
|
||||
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin,
|
||||
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
|
||||
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
|
||||
assert(cache_bin_ncached_get_local(bin) == 0
|
||||
assert(cache_bin_ncached_get_local(bin, info) == 0
|
||||
|| *arr->ptr != NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
|
||||
cache_bin_sz_t nflushed) {
|
||||
unsigned rem = cache_bin_ncached_get_local(bin) - nflushed;
|
||||
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
|
||||
unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
|
||||
memmove(bin->stack_head + nflushed, bin->stack_head,
|
||||
rem * sizeof(void *));
|
||||
bin->stack_head += nflushed;
|
||||
@ -688,22 +687,23 @@ cache_bin_finish_flush(cache_bin_t *bin, cache_bin_ptr_array_t *arr,
|
||||
|
||||
static inline void
|
||||
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
|
||||
cache_bin_ptr_array_t *arr, cache_bin_sz_t nstashed) {
|
||||
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
|
||||
cache_bin_sz_t nstashed) {
|
||||
assert(nstashed > 0);
|
||||
assert(cache_bin_nstashed_get_local(bin) == nstashed);
|
||||
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
|
||||
|
||||
void **low_bound = cache_bin_low_bound_get(bin);
|
||||
void **low_bound = cache_bin_low_bound_get(bin, info);
|
||||
arr->ptr = low_bound;
|
||||
assert(*arr->ptr != NULL);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cache_bin_finish_flush_stashed(cache_bin_t *bin) {
|
||||
void **low_bound = cache_bin_low_bound_get(bin);
|
||||
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
|
||||
void **low_bound = cache_bin_low_bound_get(bin, info);
|
||||
|
||||
/* Reset the bin local full position. */
|
||||
bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
|
||||
assert(cache_bin_nstashed_get_local(bin) == 0);
|
||||
assert(cache_bin_nstashed_get_local(bin, info) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -716,8 +716,8 @@ void cache_bin_info_init(cache_bin_info_t *bin_info,
|
||||
* Given an array of initialized cache_bin_info_ts, determine how big an
|
||||
* allocation is required to initialize a full set of cache_bin_ts.
|
||||
*/
|
||||
void cache_bin_info_compute_alloc(const cache_bin_info_t *infos,
|
||||
szind_t ninfos, size_t *size, size_t *alignment);
|
||||
void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
|
||||
size_t *size, size_t *alignment);
|
||||
|
||||
/*
|
||||
* Actually initialize some cache bins. Callers should allocate the backing
|
||||
@ -726,11 +726,11 @@ void cache_bin_info_compute_alloc(const cache_bin_info_t *infos,
|
||||
* cache_bin_postincrement. *alloc_cur will then point immediately past the end
|
||||
* of the allocation.
|
||||
*/
|
||||
void cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos,
|
||||
void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
|
||||
void *alloc, size_t *cur_offset);
|
||||
void cache_bin_postincrement(void *alloc, size_t *cur_offset);
|
||||
void cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info,
|
||||
void *alloc, size_t *cur_offset);
|
||||
void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
||||
size_t *cur_offset);
|
||||
void cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max);
|
||||
|
||||
bool cache_bin_stack_use_thp(void);
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
/* Maximum ctl tree depth. */
|
||||
#define CTL_MAX_DEPTH 7
|
||||
#define CTL_MULTI_SETTING_MAX_LEN 1000
|
||||
|
||||
typedef struct ctl_node_s {
|
||||
bool named;
|
||||
|
@ -37,10 +37,8 @@
|
||||
/* Various function pointers are static and immutable except during testing. */
|
||||
#ifdef JEMALLOC_JET
|
||||
# define JET_MUTABLE
|
||||
# define JET_EXTERN extern
|
||||
#else
|
||||
# define JET_MUTABLE const
|
||||
# define JET_EXTERN static
|
||||
#endif
|
||||
|
||||
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
|
||||
|
@ -32,12 +32,6 @@ struct malloc_mutex_s {
|
||||
* unlocking thread).
|
||||
*/
|
||||
mutex_prof_data_t prof_data;
|
||||
/*
|
||||
* Hint flag to avoid exclusive cache line contention
|
||||
* during spin waiting. Placed along with prof_data
|
||||
* since it's always modified even with no contention.
|
||||
*/
|
||||
atomic_b_t locked;
|
||||
#ifdef _WIN32
|
||||
# if _WIN32_WINNT >= 0x0600
|
||||
SRWLOCK lock;
|
||||
@ -52,6 +46,11 @@ struct malloc_mutex_s {
|
||||
#else
|
||||
pthread_mutex_t lock;
|
||||
#endif
|
||||
/*
|
||||
* Hint flag to avoid exclusive cache line contention
|
||||
* during spin waiting
|
||||
*/
|
||||
atomic_b_t locked;
|
||||
};
|
||||
/*
|
||||
* We only touch witness when configured w/ debug. However we
|
||||
@ -100,21 +99,21 @@ struct malloc_mutex_s {
|
||||
#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), OS_UNFAIR_LOCK_INIT}}, \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
|
||||
# if (defined(JEMALLOC_DEBUG))
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER, NULL}}, \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
|
||||
@ -122,11 +121,11 @@ struct malloc_mutex_s {
|
||||
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
|
||||
# if defined(JEMALLOC_DEBUG)
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT), 0}
|
||||
# else
|
||||
# define MALLOC_MUTEX_INITIALIZER \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, ATOMIC_INIT(false), PTHREAD_MUTEX_INITIALIZER}}, \
|
||||
{{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, ATOMIC_INIT(false)}}, \
|
||||
WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)}
|
||||
# endif
|
||||
#endif
|
||||
|
@ -26,7 +26,7 @@ extern unsigned opt_lg_tcache_flush_large_div;
|
||||
* it should not be changed on the fly. To change the number of tcache bins
|
||||
* in use, refer to tcache_nbins of each tcache.
|
||||
*/
|
||||
extern unsigned global_do_not_change_tcache_nbins;
|
||||
extern unsigned global_do_not_change_nbins;
|
||||
|
||||
/*
|
||||
* Maximum cached size class. Same as above, this is only used during threads
|
||||
@ -55,11 +55,6 @@ void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache,
|
||||
cache_bin_t *cache_bin, szind_t binind, unsigned rem);
|
||||
void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache,
|
||||
cache_bin_t *cache_bin, szind_t binind, bool is_small);
|
||||
bool tcache_bin_info_default_init(const char *bin_settings_segment_cur,
|
||||
size_t len_left);
|
||||
bool tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len);
|
||||
bool tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
|
||||
cache_bin_sz_t *ncached_max);
|
||||
void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
|
||||
tcache_t *tcache, arena_t *arena);
|
||||
tcache_t *tcache_create_explicit(tsd_t *tsd);
|
||||
|
@ -46,7 +46,7 @@ tcache_bin_settings_backup(tcache_t *tcache,
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
|
||||
for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) {
|
||||
cache_bin_info_init(&tcache_bin_info[i],
|
||||
cache_bin_ncached_max_get_unsafe(&tcache->bins[i]));
|
||||
tcache->bins[i].bin_info.ncached_max);
|
||||
}
|
||||
}
|
||||
|
||||
@ -54,7 +54,6 @@ JEMALLOC_ALWAYS_INLINE bool
|
||||
tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
|
||||
tcache_slow_t *tcache_slow) {
|
||||
assert(bin != NULL);
|
||||
assert(ind < TCACHE_NBINS_MAX);
|
||||
bool disabled = cache_bin_disabled(bin);
|
||||
|
||||
/*
|
||||
@ -67,7 +66,7 @@ tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
|
||||
* ind < nbins and ncached_max > 0.
|
||||
*/
|
||||
unsigned nbins = tcache_nbins_get(tcache_slow);
|
||||
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin);
|
||||
cache_bin_sz_t ncached_max = bin->bin_info.ncached_max;
|
||||
if (ind >= nbins) {
|
||||
assert(disabled);
|
||||
} else {
|
||||
@ -200,7 +199,8 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
arena_dalloc_small(tsd_tsdn(tsd), ptr);
|
||||
return;
|
||||
}
|
||||
cache_bin_sz_t max = cache_bin_ncached_max_get(bin);
|
||||
cache_bin_sz_t max = cache_bin_info_ncached_max_get(
|
||||
bin, &bin->bin_info);
|
||||
unsigned remain = max >> opt_lg_tcache_flush_small_div;
|
||||
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
|
||||
bool ret = cache_bin_dalloc_easy(bin, ptr);
|
||||
@ -215,13 +215,11 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
|
||||
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <=
|
||||
tcache_max_get(tcache->tcache_slow));
|
||||
assert(!tcache_bin_disabled(binind, &tcache->bins[binind],
|
||||
tcache->tcache_slow));
|
||||
|
||||
cache_bin_t *bin = &tcache->bins[binind];
|
||||
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
|
||||
unsigned remain = cache_bin_ncached_max_get(bin) >>
|
||||
opt_lg_tcache_flush_large_div;
|
||||
unsigned remain = cache_bin_info_ncached_max_get(
|
||||
bin, &bin->bin_info) >> opt_lg_tcache_flush_large_div;
|
||||
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
|
||||
bool ret = cache_bin_dalloc_easy(bin, ptr);
|
||||
assert(ret);
|
||||
|
@ -10,7 +10,7 @@ typedef struct tcaches_s tcaches_t;
|
||||
|
||||
/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
|
||||
#define TCACHE_ZERO_INITIALIZER {0}
|
||||
#define TCACHE_SLOW_ZERO_INITIALIZER {{0}}
|
||||
#define TCACHE_SLOW_ZERO_INITIALIZER {0}
|
||||
|
||||
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
|
||||
#define TCACHE_ENABLED_ZERO_INITIALIZER false
|
||||
|
@ -130,12 +130,4 @@ util_prefetch_write_range(void *ptr, size_t sz) {
|
||||
|
||||
#undef UTIL_INLINE
|
||||
|
||||
/*
|
||||
* Reads the settings in the following format:
|
||||
* key1-key2:value|key3-key4:value|...
|
||||
* Note it does not handle the ending '\0'.
|
||||
*/
|
||||
bool
|
||||
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
|
||||
size_t *key_start, size_t *key_end, size_t *value);
|
||||
#endif /* JEMALLOC_INTERNAL_UTIL_H */
|
||||
|
@ -96,7 +96,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\thread_event.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ticker.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c" />
|
||||
<ClCompile Include="..\..\..\..\src\util.c" />
|
||||
<ClCompile Include="..\..\..\..\src\witness.c" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
|
@ -166,9 +166,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\util.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\witness.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
@ -96,7 +96,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\thread_event.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ticker.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c" />
|
||||
<ClCompile Include="..\..\..\..\src\util.c" />
|
||||
<ClCompile Include="..\..\..\..\src\witness.c" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
|
@ -166,9 +166,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\util.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\witness.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
@ -96,7 +96,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\thread_event.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ticker.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c" />
|
||||
<ClCompile Include="..\..\..\..\src\util.c" />
|
||||
<ClCompile Include="..\..\..\..\src\witness.c" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
|
@ -166,9 +166,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\util.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\witness.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
@ -96,7 +96,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\thread_event.c" />
|
||||
<ClCompile Include="..\..\..\..\src\ticker.c" />
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c" />
|
||||
<ClCompile Include="..\..\..\..\src\util.c" />
|
||||
<ClCompile Include="..\..\..\..\src\witness.c" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
|
@ -166,9 +166,6 @@
|
||||
<ClCompile Include="..\..\..\..\src\tsd.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\util.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\..\..\src\witness.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
|
21
src/arena.c
21
src/arena.c
@ -168,7 +168,8 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
|
||||
}
|
||||
|
||||
cache_bin_sz_t ncached, nstashed;
|
||||
cache_bin_nitems_get_remote(cache_bin, &ncached, &nstashed);
|
||||
cache_bin_nitems_get_remote(cache_bin,
|
||||
&cache_bin->bin_info, &ncached, &nstashed);
|
||||
astats->tcache_bytes += ncached * sz_index2size(i);
|
||||
astats->tcache_stashed_bytes += nstashed *
|
||||
sz_index2size(i);
|
||||
@ -1019,14 +1020,15 @@ arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
|
||||
|
||||
void
|
||||
arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
|
||||
cache_bin_t *cache_bin, szind_t binind, const unsigned nfill) {
|
||||
assert(cache_bin_ncached_get_local(cache_bin) == 0);
|
||||
assert(nfill != 0);
|
||||
cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
|
||||
const unsigned nfill) {
|
||||
assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
|
||||
|
||||
const bin_info_t *bin_info = &bin_infos[binind];
|
||||
|
||||
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
|
||||
cache_bin_init_ptr_array_for_fill(cache_bin, &ptrs, nfill);
|
||||
cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
|
||||
nfill);
|
||||
/*
|
||||
* Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
|
||||
* slabs. After both are exhausted, new slabs will be allocated through
|
||||
@ -1140,7 +1142,7 @@ label_refill:
|
||||
fresh_slab = NULL;
|
||||
}
|
||||
|
||||
cache_bin_finish_fill(cache_bin, &ptrs, filled);
|
||||
cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
|
||||
arena_decay_tick(tsdn, arena);
|
||||
}
|
||||
|
||||
@ -1663,16 +1665,11 @@ arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
|
||||
}
|
||||
}
|
||||
|
||||
size_t arena_size = ALIGNMENT_CEILING(sizeof(arena_t), CACHELINE) +
|
||||
sizeof(bin_t) * nbins_total;
|
||||
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
|
||||
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
|
||||
if (arena == NULL) {
|
||||
goto label_error;
|
||||
}
|
||||
JEMALLOC_SUPPRESS_WARN_ON_USAGE(
|
||||
assert((uintptr_t)&arena->all_bins[nbins_total -1] + sizeof(bin_t) <=
|
||||
(uintptr_t)arena + arena_size);
|
||||
)
|
||||
|
||||
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
|
||||
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
|
||||
|
@ -10,7 +10,6 @@ const uintptr_t disabled_bin = JUNK_ADDR;
|
||||
void
|
||||
cache_bin_info_init(cache_bin_info_t *info,
|
||||
cache_bin_sz_t ncached_max) {
|
||||
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
|
||||
size_t stack_size = (size_t)ncached_max * sizeof(void *);
|
||||
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
|
||||
info->ncached_max = (cache_bin_sz_t)ncached_max;
|
||||
@ -28,7 +27,7 @@ cache_bin_stack_use_thp(void) {
|
||||
}
|
||||
|
||||
void
|
||||
cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos,
|
||||
cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
|
||||
size_t *size, size_t *alignment) {
|
||||
/* For the total bin stack region (per tcache), reserve 2 more slots so
|
||||
* that
|
||||
@ -51,7 +50,7 @@ cache_bin_info_compute_alloc(const cache_bin_info_t *infos, szind_t ninfos,
|
||||
}
|
||||
|
||||
void
|
||||
cache_bin_preincrement(const cache_bin_info_t *infos, szind_t ninfos, void *alloc,
|
||||
cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
|
||||
size_t *cur_offset) {
|
||||
if (config_debug) {
|
||||
size_t computed_size;
|
||||
@ -76,7 +75,7 @@ cache_bin_postincrement(void *alloc, size_t *cur_offset) {
|
||||
}
|
||||
|
||||
void
|
||||
cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
|
||||
cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
||||
size_t *cur_offset) {
|
||||
/*
|
||||
* The full_position points to the lowest available space. Allocations
|
||||
@ -100,7 +99,7 @@ cache_bin_init(cache_bin_t *bin, const cache_bin_info_t *info, void *alloc,
|
||||
bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head);
|
||||
assert(free_spots == bin_stack_size);
|
||||
if (!cache_bin_disabled(bin)) {
|
||||
assert(cache_bin_ncached_get_local(bin) == 0);
|
||||
assert(cache_bin_ncached_get_local(bin, &bin->bin_info) == 0);
|
||||
}
|
||||
assert(cache_bin_empty_position_get(bin) == empty_position);
|
||||
|
||||
|
85
src/ctl.c
85
src/ctl.c
@ -68,8 +68,6 @@ CTL_PROTO(max_background_threads)
|
||||
CTL_PROTO(thread_tcache_enabled)
|
||||
CTL_PROTO(thread_tcache_max)
|
||||
CTL_PROTO(thread_tcache_flush)
|
||||
CTL_PROTO(thread_tcache_ncached_max_write)
|
||||
CTL_PROTO(thread_tcache_ncached_max_read_sizeclass)
|
||||
CTL_PROTO(thread_peak_read)
|
||||
CTL_PROTO(thread_peak_reset)
|
||||
CTL_PROTO(thread_prof_name)
|
||||
@ -376,17 +374,10 @@ CTL_PROTO(stats_mutexes_reset)
|
||||
*/
|
||||
#define INDEX(i) {false}, i##_index
|
||||
|
||||
static const ctl_named_node_t thread_tcache_ncached_max_node[] = {
|
||||
{NAME("read_sizeclass"),
|
||||
CTL(thread_tcache_ncached_max_read_sizeclass)},
|
||||
{NAME("write"), CTL(thread_tcache_ncached_max_write)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t thread_tcache_node[] = {
|
||||
{NAME("enabled"), CTL(thread_tcache_enabled)},
|
||||
{NAME("max"), CTL(thread_tcache_max)},
|
||||
{NAME("flush"), CTL(thread_tcache_flush)},
|
||||
{NAME("ncached_max"), CHILD(named, thread_tcache_ncached_max)}
|
||||
{NAME("flush"), CTL(thread_tcache_flush)}
|
||||
};
|
||||
|
||||
static const ctl_named_node_t thread_peak_node[] = {
|
||||
@ -2291,78 +2282,6 @@ label_return:
|
||||
|
||||
CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
|
||||
CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
|
||||
|
||||
static int
|
||||
thread_tcache_ncached_max_read_sizeclass_ctl(tsd_t *tsd, const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
|
||||
size_t newlen) {
|
||||
int ret;
|
||||
size_t bin_size = 0;
|
||||
|
||||
/* Read the bin size from newp. */
|
||||
if (newp == NULL) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
WRITE(bin_size, size_t);
|
||||
|
||||
cache_bin_sz_t ncached_max = 0;
|
||||
if (tcache_bin_ncached_max_read(tsd, bin_size, &ncached_max)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
size_t result = (size_t)ncached_max;
|
||||
READ(result, size_t);
|
||||
ret = 0;
|
||||
label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
thread_tcache_ncached_max_write_ctl(tsd_t *tsd, const size_t *mib,
|
||||
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
|
||||
size_t newlen) {
|
||||
int ret;
|
||||
WRITEONLY();
|
||||
if (newp != NULL) {
|
||||
if (!tcache_available(tsd)) {
|
||||
ret = ENOENT;
|
||||
goto label_return;
|
||||
}
|
||||
char *settings = NULL;
|
||||
WRITE(settings, char *);
|
||||
if (settings == NULL) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
/* Get the length of the setting string safely. */
|
||||
char *end = (char *)memchr(settings, '\0',
|
||||
CTL_MULTI_SETTING_MAX_LEN);
|
||||
if (end == NULL) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
/*
|
||||
* Exclude the last '\0' for len since it is not handled by
|
||||
* multi_setting_parse_next.
|
||||
*/
|
||||
size_t len = (uintptr_t)end - (uintptr_t)settings;
|
||||
if (len == 0) {
|
||||
ret = 0;
|
||||
goto label_return;
|
||||
}
|
||||
|
||||
if (tcache_bins_ncached_max_write(tsd, settings, len)) {
|
||||
ret = EINVAL;
|
||||
goto label_return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
label_return:
|
||||
return ret;
|
||||
}
|
||||
|
||||
CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
|
||||
CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
|
||||
|
||||
@ -3236,7 +3155,7 @@ CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
|
||||
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
|
||||
CTL_RO_NL_GEN(arenas_tcache_max, global_do_not_change_tcache_maxclass, size_t)
|
||||
CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_tcache_nbins, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_nhbins, global_do_not_change_nbins, unsigned)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
|
||||
CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
|
||||
|
@ -821,6 +821,50 @@ init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
|
||||
assert(opts_len == strlen(dest));
|
||||
}
|
||||
|
||||
/* Reads the next size pair in a multi-sized option. */
|
||||
static bool
|
||||
malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
|
||||
size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
|
||||
const char *cur = *slab_size_segment_cur;
|
||||
char *end;
|
||||
uintmax_t um;
|
||||
|
||||
set_errno(0);
|
||||
|
||||
/* First number, then '-' */
|
||||
um = malloc_strtoumax(cur, &end, 0);
|
||||
if (get_errno() != 0 || *end != '-') {
|
||||
return true;
|
||||
}
|
||||
*slab_start = (size_t)um;
|
||||
cur = end + 1;
|
||||
|
||||
/* Second number, then ':' */
|
||||
um = malloc_strtoumax(cur, &end, 0);
|
||||
if (get_errno() != 0 || *end != ':') {
|
||||
return true;
|
||||
}
|
||||
*slab_end = (size_t)um;
|
||||
cur = end + 1;
|
||||
|
||||
/* Last number */
|
||||
um = malloc_strtoumax(cur, &end, 0);
|
||||
if (get_errno() != 0) {
|
||||
return true;
|
||||
}
|
||||
*new_size = (size_t)um;
|
||||
|
||||
/* Consume the separator if there is one. */
|
||||
if (*end == '|') {
|
||||
end++;
|
||||
}
|
||||
|
||||
*vlen_left -= end - *slab_size_segment_cur;
|
||||
*slab_size_segment_cur = end;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
malloc_conf_format_error(const char *msg, const char *begin, const char *end) {
|
||||
size_t len = end - begin + 1;
|
||||
@ -1307,7 +1351,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
size_t size_start;
|
||||
size_t size_end;
|
||||
size_t nshards;
|
||||
bool err = multi_setting_parse_next(
|
||||
bool err = malloc_conf_multi_sizes_next(
|
||||
&bin_shards_segment_cur, &vlen_left,
|
||||
&size_start, &size_end, &nshards);
|
||||
if (err || bin_update_shard_size(
|
||||
@ -1322,16 +1366,6 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
} while (vlen_left > 0);
|
||||
CONF_CONTINUE;
|
||||
}
|
||||
if (CONF_MATCH("tcache_ncached_max")) {
|
||||
bool err = tcache_bin_info_default_init(
|
||||
v, vlen);
|
||||
if (err) {
|
||||
CONF_ERROR("Invalid settings for "
|
||||
"tcache_ncached_max", k, klen, v,
|
||||
vlen);
|
||||
}
|
||||
CONF_CONTINUE;
|
||||
}
|
||||
CONF_HANDLE_INT64_T(opt_mutex_max_spin,
|
||||
"mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
|
||||
CONF_DONT_CHECK_MAX, false);
|
||||
@ -1579,7 +1613,7 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
|
||||
size_t slab_start;
|
||||
size_t slab_end;
|
||||
size_t pgs;
|
||||
err = multi_setting_parse_next(
|
||||
err = malloc_conf_multi_sizes_next(
|
||||
&slab_size_segment_cur,
|
||||
&vlen_left, &slab_start, &slab_end,
|
||||
&pgs);
|
||||
@ -4106,7 +4140,6 @@ batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
|
||||
tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
|
||||
/* slow */ true, /* is_alloc */ true);
|
||||
if (likely(tcache != NULL &&
|
||||
ind < tcache_nbins_get(tcache->tcache_slow) &&
|
||||
!tcache_bin_disabled(ind, &tcache->bins[ind],
|
||||
tcache->tcache_slow)) && progress < batch) {
|
||||
if (bin == NULL) {
|
||||
|
231
src/tcache.c
231
src/tcache.c
@ -63,7 +63,7 @@ unsigned opt_lg_tcache_flush_large_div = 1;
|
||||
* is only used to initialize tcache_nbins in the per-thread tcache.
|
||||
* Directly modifying it will not affect threads already launched.
|
||||
*/
|
||||
unsigned global_do_not_change_tcache_nbins;
|
||||
unsigned global_do_not_change_nbins;
|
||||
/*
|
||||
* Max size class to be cached (can be small or large). This value is only used
|
||||
* to initialize tcache_max in the per-thread tcache. Directly modifying it
|
||||
@ -71,18 +71,6 @@ unsigned global_do_not_change_tcache_nbins;
|
||||
*/
|
||||
size_t global_do_not_change_tcache_maxclass;
|
||||
|
||||
/*
|
||||
* Default bin info for each bin. Will be initialized in malloc_conf_init
|
||||
* and tcache_boot and should not be modified after that.
|
||||
*/
|
||||
static cache_bin_info_t opt_tcache_ncached_max[TCACHE_NBINS_MAX] = {{0}};
|
||||
/*
|
||||
* Marks whether a bin's info is set already. This is used in
|
||||
* tcache_bin_info_compute to avoid overwriting ncached_max specified by
|
||||
* malloc_conf. It should be set only when parsing malloc_conf.
|
||||
*/
|
||||
static bool opt_tcache_ncached_max_set[TCACHE_NBINS_MAX] = {0};
|
||||
|
||||
tcaches_t *tcaches;
|
||||
|
||||
/* Index of first element within tcaches that has never been used. */
|
||||
@ -142,8 +130,10 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
|
||||
|
||||
cache_bin_t *cache_bin = &tcache->bins[szind];
|
||||
assert(!tcache_bin_disabled(szind, cache_bin, tcache->tcache_slow));
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin);
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
assert(!tcache_slow->bin_refilled[szind]);
|
||||
|
||||
size_t nflush = low_water - (low_water >> 2);
|
||||
@ -166,8 +156,8 @@ tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
|
||||
* Reduce fill count by 2X. Limit lg_fill_div such that
|
||||
* the fill count is always at least 1.
|
||||
*/
|
||||
if ((cache_bin_ncached_max_get(cache_bin) >>
|
||||
(tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
|
||||
if ((cache_bin_info_ncached_max_get(cache_bin, &cache_bin->bin_info)
|
||||
>> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
|
||||
tcache_slow->lg_fill_div[szind]++;
|
||||
}
|
||||
}
|
||||
@ -179,8 +169,10 @@ tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
|
||||
assert(szind >= SC_NBINS);
|
||||
cache_bin_t *cache_bin = &tcache->bins[szind];
|
||||
assert(!tcache_bin_disabled(szind, cache_bin, tcache->tcache_slow));
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin);
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
tcache_bin_flush_large(tsd, tcache, cache_bin, szind,
|
||||
(unsigned)(ncached - low_water + (low_water >> 2)));
|
||||
}
|
||||
@ -201,8 +193,10 @@ tcache_event(tsd_t *tsd) {
|
||||
goto label_done;
|
||||
}
|
||||
|
||||
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin);
|
||||
tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind,
|
||||
is_small);
|
||||
cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
if (low_water > 0) {
|
||||
if (is_small) {
|
||||
tcache_gc_small(tsd, tcache_slow, tcache, szind);
|
||||
@ -250,12 +244,10 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
|
||||
|
||||
assert(tcache_slow->arena != NULL);
|
||||
assert(!tcache_bin_disabled(binind, cache_bin, tcache_slow));
|
||||
unsigned nfill = cache_bin_ncached_max_get(cache_bin)
|
||||
>> tcache_slow->lg_fill_div[binind];
|
||||
if (nfill == 0) {
|
||||
nfill = 1;
|
||||
}
|
||||
arena_cache_bin_fill_small(tsdn, arena, cache_bin, binind, nfill);
|
||||
unsigned nfill = cache_bin_info_ncached_max_get(cache_bin,
|
||||
&cache_bin->bin_info) >> tcache_slow->lg_fill_div[binind];
|
||||
arena_cache_bin_fill_small(tsdn, arena, cache_bin,
|
||||
&cache_bin->bin_info, binind, nfill);
|
||||
tcache_slow->bin_refilled[binind] = true;
|
||||
ret = cache_bin_alloc(cache_bin, tcache_success);
|
||||
|
||||
@ -527,17 +519,20 @@ tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
|
||||
assert(!tcache_bin_disabled(binind, cache_bin, tcache->tcache_slow));
|
||||
tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
|
||||
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
assert((cache_bin_sz_t)rem <= ncached);
|
||||
unsigned nflush = ncached - rem;
|
||||
|
||||
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
|
||||
cache_bin_init_ptr_array_for_flush(cache_bin, &ptrs, nflush);
|
||||
cache_bin_init_ptr_array_for_flush(cache_bin, &cache_bin->bin_info,
|
||||
&ptrs, nflush);
|
||||
|
||||
tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
|
||||
small);
|
||||
|
||||
cache_bin_finish_flush(cache_bin, &ptrs, ncached - rem);
|
||||
cache_bin_finish_flush(cache_bin, &cache_bin->bin_info, &ptrs,
|
||||
ncached - rem);
|
||||
}
|
||||
|
||||
void
|
||||
@ -566,65 +561,36 @@ void
|
||||
tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
|
||||
szind_t binind, bool is_small) {
|
||||
assert(!tcache_bin_disabled(binind, cache_bin, tcache->tcache_slow));
|
||||
cache_bin_info_t *info = &cache_bin->bin_info;
|
||||
/*
|
||||
* The two below are for assertion only. The content of original cached
|
||||
* items remain unchanged -- the stashed items reside on the other end
|
||||
* of the stack. Checking the stack head and ncached to verify.
|
||||
*/
|
||||
void *head_content = *cache_bin->stack_head;
|
||||
cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin);
|
||||
cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
|
||||
info);
|
||||
|
||||
cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin);
|
||||
assert(orig_cached + nstashed <= cache_bin_ncached_max_get(cache_bin));
|
||||
cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info);
|
||||
assert(orig_cached + nstashed <=
|
||||
cache_bin_info_ncached_max_get(cache_bin, info));
|
||||
if (nstashed == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed);
|
||||
cache_bin_init_ptr_array_for_stashed(cache_bin, binind, &ptrs,
|
||||
cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs,
|
||||
nstashed);
|
||||
san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind));
|
||||
tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed,
|
||||
is_small);
|
||||
cache_bin_finish_flush_stashed(cache_bin);
|
||||
cache_bin_finish_flush_stashed(cache_bin, info);
|
||||
|
||||
assert(cache_bin_nstashed_get_local(cache_bin) == 0);
|
||||
assert(cache_bin_ncached_get_local(cache_bin) == orig_cached);
|
||||
assert(cache_bin_nstashed_get_local(cache_bin, info) == 0);
|
||||
assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
|
||||
assert(head_content == *cache_bin->stack_head);
|
||||
}
|
||||
|
||||
JET_EXTERN bool
|
||||
tcache_get_default_ncached_max_set(szind_t ind) {
|
||||
return opt_tcache_ncached_max_set[ind];
|
||||
}
|
||||
|
||||
JET_EXTERN const cache_bin_info_t *
|
||||
tcache_get_default_ncached_max(void) {
|
||||
return opt_tcache_ncached_max;
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_bin_ncached_max_read(tsd_t *tsd, size_t bin_size,
|
||||
cache_bin_sz_t *ncached_max) {
|
||||
if (bin_size > TCACHE_MAXCLASS_LIMIT) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!tcache_available(tsd)) {
|
||||
*ncached_max = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
assert(tcache != NULL);
|
||||
szind_t bin_ind = sz_size2index(bin_size);
|
||||
|
||||
cache_bin_t *bin = &tcache->bins[bin_ind];
|
||||
*ncached_max = tcache_bin_disabled(bin_ind, bin, tcache->tcache_slow) ?
|
||||
0: cache_bin_ncached_max_get(bin);
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
|
||||
tcache_t *tcache, arena_t *arena) {
|
||||
@ -685,13 +651,13 @@ static void
|
||||
tcache_default_settings_init(tcache_slow_t *tcache_slow) {
|
||||
assert(tcache_slow != NULL);
|
||||
assert(global_do_not_change_tcache_maxclass != 0);
|
||||
assert(global_do_not_change_tcache_nbins != 0);
|
||||
tcache_slow->tcache_nbins = global_do_not_change_tcache_nbins;
|
||||
assert(global_do_not_change_nbins != 0);
|
||||
tcache_slow->tcache_nbins = global_do_not_change_nbins;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
|
||||
void *mem, const cache_bin_info_t *tcache_bin_info) {
|
||||
void *mem, cache_bin_info_t *tcache_bin_info) {
|
||||
tcache->tcache_slow = tcache_slow;
|
||||
tcache_slow->tcache = tcache;
|
||||
|
||||
@ -806,16 +772,14 @@ tcache_ncached_max_compute(szind_t szind) {
|
||||
}
|
||||
}
|
||||
|
||||
JET_EXTERN void
|
||||
static void
|
||||
tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
|
||||
/*
|
||||
* Compute the values for each bin, but for bins with indices larger
|
||||
* than tcache_nbins, no items will be cached.
|
||||
*/
|
||||
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
|
||||
unsigned ncached_max = tcache_get_default_ncached_max_set(i) ?
|
||||
(unsigned)tcache_get_default_ncached_max()[i].ncached_max:
|
||||
tcache_ncached_max_compute(i);
|
||||
unsigned ncached_max = tcache_ncached_max_compute(i);
|
||||
assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
|
||||
cache_bin_info_init(&tcache_bin_info[i], ncached_max);
|
||||
}
|
||||
@ -823,7 +787,7 @@ tcache_bin_info_compute(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
|
||||
|
||||
static bool
|
||||
tsd_tcache_data_init_impl(tsd_t *tsd, arena_t *arena,
|
||||
const cache_bin_info_t *tcache_bin_info) {
|
||||
cache_bin_info_t *tcache_bin_info) {
|
||||
tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
|
||||
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
|
||||
|
||||
@ -877,11 +841,20 @@ tsd_tcache_data_init_impl(tsd_t *tsd, arena_t *arena,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
tsd_tcache_data_init_with_bin_settings(tsd_t *tsd, arena_t *arena,
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
|
||||
assert(tcache_bin_info != NULL);
|
||||
return tsd_tcache_data_init_impl(tsd, arena, tcache_bin_info);
|
||||
}
|
||||
|
||||
/* Initialize auto tcache (embedded in TSD). */
|
||||
static bool
|
||||
tsd_tcache_data_init(tsd_t *tsd, arena_t *arena,
|
||||
const cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
|
||||
assert(tcache_bin_info != NULL);
|
||||
tsd_tcache_data_init(tsd_t *tsd, arena_t *arena) {
|
||||
/* Takes 146B stack space. */
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}};
|
||||
tcache_bin_info_compute(tcache_bin_info);
|
||||
|
||||
return tsd_tcache_data_init_impl(tsd, arena, tcache_bin_info);
|
||||
}
|
||||
|
||||
@ -893,10 +866,12 @@ tcache_create_explicit(tsd_t *tsd) {
|
||||
* the beginning of the whole allocation (for freeing). The makes sure
|
||||
* the cache bins have the requested alignment.
|
||||
*/
|
||||
unsigned tcache_nbins = global_do_not_change_tcache_nbins;
|
||||
unsigned tcache_nbins = global_do_not_change_nbins;
|
||||
size_t tcache_size, alignment;
|
||||
cache_bin_info_compute_alloc(tcache_get_default_ncached_max(),
|
||||
tcache_nbins, &tcache_size, &alignment);
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX] = {{0}};
|
||||
tcache_bin_info_compute(tcache_bin_info);
|
||||
cache_bin_info_compute_alloc(tcache_bin_info, tcache_nbins,
|
||||
&tcache_size, &alignment);
|
||||
|
||||
size_t size = tcache_size + sizeof(tcache_t)
|
||||
+ sizeof(tcache_slow_t);
|
||||
@ -913,8 +888,7 @@ tcache_create_explicit(tsd_t *tsd) {
|
||||
tcache_slow_t *tcache_slow =
|
||||
(void *)((byte_t *)mem + tcache_size + sizeof(tcache_t));
|
||||
tcache_default_settings_init(tcache_slow);
|
||||
tcache_init(tsd, tcache_slow, tcache, mem,
|
||||
tcache_get_default_ncached_max());
|
||||
tcache_init(tsd, tcache_slow, tcache, mem, tcache_bin_info);
|
||||
|
||||
tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
|
||||
arena_ichoose(tsd, NULL));
|
||||
@ -935,8 +909,7 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) {
|
||||
|
||||
if (opt_tcache) {
|
||||
/* Trigger tcache init. */
|
||||
tsd_tcache_data_init(tsd, NULL,
|
||||
tcache_get_default_ncached_max());
|
||||
tsd_tcache_data_init(tsd, NULL);
|
||||
}
|
||||
|
||||
return false;
|
||||
@ -947,8 +920,7 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
||||
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
||||
|
||||
if (!was_enabled && enabled) {
|
||||
tsd_tcache_data_init(tsd, NULL,
|
||||
tcache_get_default_ncached_max());
|
||||
tsd_tcache_data_init(tsd, NULL);
|
||||
} else if (was_enabled && !enabled) {
|
||||
tcache_cleanup(tsd);
|
||||
}
|
||||
@ -984,79 +956,13 @@ thread_tcache_max_set(tsd_t *tsd, size_t tcache_max) {
|
||||
tcache_max_set(tcache_slow, tcache_max);
|
||||
|
||||
if (enabled) {
|
||||
tsd_tcache_data_init(tsd, assigned_arena, tcache_bin_info);
|
||||
tsd_tcache_data_init_with_bin_settings(tsd, assigned_arena,
|
||||
tcache_bin_info);
|
||||
}
|
||||
|
||||
assert(tcache_nbins_get(tcache_slow) == sz_size2index(tcache_max) + 1);
|
||||
}
|
||||
|
||||
static bool
|
||||
tcache_bin_info_settings_parse(const char *bin_settings_segment_cur,
|
||||
size_t len_left, cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX],
|
||||
bool bin_info_is_set[TCACHE_NBINS_MAX]) {
|
||||
do {
|
||||
size_t size_start, size_end;
|
||||
size_t ncached_max;
|
||||
bool err = multi_setting_parse_next(&bin_settings_segment_cur,
|
||||
&len_left, &size_start, &size_end, &ncached_max);
|
||||
if (err) {
|
||||
return true;
|
||||
}
|
||||
if (size_end > TCACHE_MAXCLASS_LIMIT) {
|
||||
size_end = TCACHE_MAXCLASS_LIMIT;
|
||||
}
|
||||
if (size_start > TCACHE_MAXCLASS_LIMIT ||
|
||||
size_start > size_end) {
|
||||
continue;
|
||||
}
|
||||
/* May get called before sz_init (during malloc_conf_init). */
|
||||
szind_t bin_start = sz_size2index_compute(size_start);
|
||||
szind_t bin_end = sz_size2index_compute(size_end);
|
||||
if (ncached_max > CACHE_BIN_NCACHED_MAX) {
|
||||
ncached_max = (size_t)CACHE_BIN_NCACHED_MAX;
|
||||
}
|
||||
for (szind_t i = bin_start; i <= bin_end; i++) {
|
||||
cache_bin_info_init(&tcache_bin_info[i],
|
||||
(cache_bin_sz_t)ncached_max);
|
||||
if (bin_info_is_set != NULL) {
|
||||
bin_info_is_set[i] = true;
|
||||
}
|
||||
}
|
||||
} while (len_left > 0);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool
|
||||
tcache_bin_info_default_init(const char *bin_settings_segment_cur,
|
||||
size_t len_left) {
|
||||
return tcache_bin_info_settings_parse(bin_settings_segment_cur,
|
||||
len_left, opt_tcache_ncached_max, opt_tcache_ncached_max_set);
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
tcache_bins_ncached_max_write(tsd_t *tsd, char *settings, size_t len) {
|
||||
assert(tcache_available(tsd));
|
||||
assert(len != 0);
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
assert(tcache != NULL);
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX];
|
||||
tcache_bin_settings_backup(tcache, tcache_bin_info);
|
||||
|
||||
if(tcache_bin_info_settings_parse(settings, len, tcache_bin_info,
|
||||
NULL)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
arena_t *assigned_arena = tcache->tcache_slow->arena;
|
||||
tcache_cleanup(tsd);
|
||||
tsd_tcache_data_init(tsd, assigned_arena,
|
||||
tcache_bin_info);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
|
||||
tcache_slow_t *tcache_slow = tcache->tcache_slow;
|
||||
@ -1093,7 +999,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
|
||||
|
||||
if (tsd_tcache) {
|
||||
cache_bin_t *cache_bin = &tcache->bins[0];
|
||||
cache_bin_assert_empty(cache_bin);
|
||||
cache_bin_assert_empty(cache_bin, &cache_bin->bin_info);
|
||||
}
|
||||
if (tsd_tcache && cache_bin_stack_use_thp()) {
|
||||
b0_dalloc_tcache_stack(tsd_tsdn(tsd), tcache_slow->dyn_alloc);
|
||||
@ -1274,15 +1180,8 @@ bool
|
||||
tcache_boot(tsdn_t *tsdn, base_t *base) {
|
||||
global_do_not_change_tcache_maxclass = sz_s2u(opt_tcache_max);
|
||||
assert(global_do_not_change_tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
|
||||
global_do_not_change_tcache_nbins =
|
||||
global_do_not_change_nbins =
|
||||
sz_size2index(global_do_not_change_tcache_maxclass) + 1;
|
||||
/*
|
||||
* Pre-compute default bin info and store the results in
|
||||
* opt_tcache_ncached_max. After the changes here,
|
||||
* opt_tcache_ncached_max should not be modified and should always be
|
||||
* accessed using tcache_get_default_ncached_max.
|
||||
*/
|
||||
tcache_bin_info_compute(opt_tcache_ncached_max);
|
||||
|
||||
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
|
||||
malloc_mutex_rank_exclusive)) {
|
||||
|
49
src/util.c
49
src/util.c
@ -1,49 +0,0 @@
|
||||
#include "jemalloc/internal/jemalloc_preamble.h"
|
||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||
|
||||
#include "jemalloc/internal/util.h"
|
||||
|
||||
/* Reads the next size pair in a multi-sized option. */
|
||||
bool
|
||||
multi_setting_parse_next(const char **setting_segment_cur, size_t *len_left,
|
||||
size_t *key_start, size_t *key_end, size_t *value) {
|
||||
const char *cur = *setting_segment_cur;
|
||||
char *end;
|
||||
uintmax_t um;
|
||||
|
||||
set_errno(0);
|
||||
|
||||
/* First number, then '-' */
|
||||
um = malloc_strtoumax(cur, &end, 0);
|
||||
if (get_errno() != 0 || *end != '-') {
|
||||
return true;
|
||||
}
|
||||
*key_start = (size_t)um;
|
||||
cur = end + 1;
|
||||
|
||||
/* Second number, then ':' */
|
||||
um = malloc_strtoumax(cur, &end, 0);
|
||||
if (get_errno() != 0 || *end != ':') {
|
||||
return true;
|
||||
}
|
||||
*key_end = (size_t)um;
|
||||
cur = end + 1;
|
||||
|
||||
/* Last number */
|
||||
um = malloc_strtoumax(cur, &end, 0);
|
||||
if (get_errno() != 0) {
|
||||
return true;
|
||||
}
|
||||
*value = (size_t)um;
|
||||
|
||||
/* Consume the separator if there is one. */
|
||||
if (*end == '|') {
|
||||
end++;
|
||||
}
|
||||
|
||||
*len_left -= end - *setting_segment_cur;
|
||||
*setting_segment_cur = end;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -1,18 +1,19 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
|
||||
static void
|
||||
do_fill_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t ncached_max,
|
||||
cache_bin_sz_t nfill_attempt, cache_bin_sz_t nfill_succeed) {
|
||||
do_fill_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
|
||||
cache_bin_sz_t ncached_max, cache_bin_sz_t nfill_attempt,
|
||||
cache_bin_sz_t nfill_succeed) {
|
||||
bool success;
|
||||
void *ptr;
|
||||
assert_true(cache_bin_ncached_get_local(bin) == 0, "");
|
||||
assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
|
||||
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill_attempt);
|
||||
cache_bin_init_ptr_array_for_fill(bin, &arr, nfill_attempt);
|
||||
cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill_attempt);
|
||||
for (cache_bin_sz_t i = 0; i < nfill_succeed; i++) {
|
||||
arr.ptr[i] = &ptrs[i];
|
||||
}
|
||||
cache_bin_finish_fill(bin, &arr, nfill_succeed);
|
||||
expect_true(cache_bin_ncached_get_local(bin) == nfill_succeed,
|
||||
cache_bin_finish_fill(bin, info, &arr, nfill_succeed);
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == nfill_succeed,
|
||||
"");
|
||||
cache_bin_low_water_set(bin);
|
||||
|
||||
@ -21,18 +22,18 @@ do_fill_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t ncached_max,
|
||||
expect_true(success, "");
|
||||
expect_ptr_eq(ptr, (void *)&ptrs[i],
|
||||
"Should pop in order filled");
|
||||
expect_true(cache_bin_low_water_get(bin)
|
||||
expect_true(cache_bin_low_water_get(bin, info)
|
||||
== nfill_succeed - i - 1, "");
|
||||
}
|
||||
expect_true(cache_bin_ncached_get_local(bin) == 0, "");
|
||||
expect_true(cache_bin_low_water_get(bin) == 0, "");
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == 0, "");
|
||||
expect_true(cache_bin_low_water_get(bin, info) == 0, "");
|
||||
}
|
||||
|
||||
static void
|
||||
do_flush_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
cache_bin_sz_t nflush) {
|
||||
do_flush_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
|
||||
cache_bin_sz_t nfill, cache_bin_sz_t nflush) {
|
||||
bool success;
|
||||
assert_true(cache_bin_ncached_get_local(bin) == 0, "");
|
||||
assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
|
||||
|
||||
for (cache_bin_sz_t i = 0; i < nfill; i++) {
|
||||
success = cache_bin_dalloc_easy(bin, &ptrs[i]);
|
||||
@ -40,30 +41,30 @@ do_flush_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
}
|
||||
|
||||
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nflush);
|
||||
cache_bin_init_ptr_array_for_flush(bin, &arr, nflush);
|
||||
cache_bin_init_ptr_array_for_flush(bin, info, &arr, nflush);
|
||||
for (cache_bin_sz_t i = 0; i < nflush; i++) {
|
||||
expect_ptr_eq(arr.ptr[i], &ptrs[nflush - i - 1], "");
|
||||
}
|
||||
cache_bin_finish_flush(bin, &arr, nflush);
|
||||
cache_bin_finish_flush(bin, info, &arr, nflush);
|
||||
|
||||
expect_true(cache_bin_ncached_get_local(bin) == nfill - nflush,
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == nfill - nflush,
|
||||
"");
|
||||
while (cache_bin_ncached_get_local(bin) > 0) {
|
||||
while (cache_bin_ncached_get_local(bin, info) > 0) {
|
||||
cache_bin_alloc(bin, &success);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
do_batch_alloc_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
size_t batch) {
|
||||
assert_true(cache_bin_ncached_get_local(bin) == 0, "");
|
||||
do_batch_alloc_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
|
||||
cache_bin_sz_t nfill, size_t batch) {
|
||||
assert_true(cache_bin_ncached_get_local(bin, info) == 0, "");
|
||||
CACHE_BIN_PTR_ARRAY_DECLARE(arr, nfill);
|
||||
cache_bin_init_ptr_array_for_fill(bin, &arr, nfill);
|
||||
cache_bin_init_ptr_array_for_fill(bin, info, &arr, nfill);
|
||||
for (cache_bin_sz_t i = 0; i < nfill; i++) {
|
||||
arr.ptr[i] = &ptrs[i];
|
||||
}
|
||||
cache_bin_finish_fill(bin, &arr, nfill);
|
||||
assert_true(cache_bin_ncached_get_local(bin) == nfill, "");
|
||||
cache_bin_finish_fill(bin, info, &arr, nfill);
|
||||
assert_true(cache_bin_ncached_get_local(bin, info) == nfill, "");
|
||||
cache_bin_low_water_set(bin);
|
||||
|
||||
void **out = malloc((batch + 1) * sizeof(void *));
|
||||
@ -72,9 +73,9 @@ do_batch_alloc_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
for (cache_bin_sz_t i = 0; i < (cache_bin_sz_t)n; i++) {
|
||||
expect_ptr_eq(out[i], &ptrs[i], "");
|
||||
}
|
||||
expect_true(cache_bin_low_water_get(bin) == nfill -
|
||||
expect_true(cache_bin_low_water_get(bin, info) == nfill -
|
||||
(cache_bin_sz_t)n, "");
|
||||
while (cache_bin_ncached_get_local(bin) > 0) {
|
||||
while (cache_bin_ncached_get_local(bin, info) > 0) {
|
||||
bool success;
|
||||
cache_bin_alloc(bin, &success);
|
||||
}
|
||||
@ -105,11 +106,13 @@ TEST_BEGIN(test_cache_bin) {
|
||||
cache_bin_info_init(&info, ncached_max);
|
||||
cache_bin_t bin;
|
||||
test_bin_init(&bin, &info);
|
||||
cache_bin_info_t *bin_info = &bin.bin_info;
|
||||
|
||||
/* Initialize to empty; should then have 0 elements. */
|
||||
expect_d_eq(ncached_max, cache_bin_ncached_max_get(&bin), "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin) == 0, "");
|
||||
expect_true(cache_bin_low_water_get(&bin) == 0, "");
|
||||
expect_d_eq(ncached_max, cache_bin_info_ncached_max_get(&bin,
|
||||
&bin.bin_info), "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == 0, "");
|
||||
expect_true(cache_bin_low_water_get(&bin, bin_info) == 0, "");
|
||||
|
||||
ptr = cache_bin_alloc_easy(&bin, &success);
|
||||
expect_false(success, "Shouldn't successfully allocate when empty");
|
||||
@ -126,14 +129,14 @@ TEST_BEGIN(test_cache_bin) {
|
||||
void **ptrs = mallocx(sizeof(void *) * (ncached_max + 1), 0);
|
||||
assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
|
||||
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
|
||||
expect_true(cache_bin_ncached_get_local(&bin) == i, "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == i, "");
|
||||
success = cache_bin_dalloc_easy(&bin, &ptrs[i]);
|
||||
expect_true(success,
|
||||
"Should be able to dalloc into a non-full cache bin.");
|
||||
expect_true(cache_bin_low_water_get(&bin) == 0,
|
||||
expect_true(cache_bin_low_water_get(&bin, bin_info) == 0,
|
||||
"Pushes and pops shouldn't change low water of zero.");
|
||||
}
|
||||
expect_true(cache_bin_ncached_get_local(&bin) == ncached_max,
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == ncached_max,
|
||||
"");
|
||||
success = cache_bin_dalloc_easy(&bin, &ptrs[ncached_max]);
|
||||
expect_false(success, "Shouldn't be able to dalloc into a full bin.");
|
||||
@ -141,9 +144,9 @@ TEST_BEGIN(test_cache_bin) {
|
||||
cache_bin_low_water_set(&bin);
|
||||
|
||||
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
|
||||
expect_true(cache_bin_low_water_get(&bin)
|
||||
expect_true(cache_bin_low_water_get(&bin, bin_info)
|
||||
== ncached_max - i, "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin)
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info)
|
||||
== ncached_max - i, "");
|
||||
/*
|
||||
* This should fail -- the easy variant can't change the low
|
||||
@ -152,9 +155,9 @@ TEST_BEGIN(test_cache_bin) {
|
||||
ptr = cache_bin_alloc_easy(&bin, &success);
|
||||
expect_ptr_null(ptr, "");
|
||||
expect_false(success, "");
|
||||
expect_true(cache_bin_low_water_get(&bin)
|
||||
expect_true(cache_bin_low_water_get(&bin, bin_info)
|
||||
== ncached_max - i, "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin)
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info)
|
||||
== ncached_max - i, "");
|
||||
|
||||
/* This should succeed, though. */
|
||||
@ -162,13 +165,13 @@ TEST_BEGIN(test_cache_bin) {
|
||||
expect_true(success, "");
|
||||
expect_ptr_eq(ptr, &ptrs[ncached_max - i - 1],
|
||||
"Alloc should pop in stack order");
|
||||
expect_true(cache_bin_low_water_get(&bin)
|
||||
expect_true(cache_bin_low_water_get(&bin, bin_info)
|
||||
== ncached_max - i - 1, "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin)
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info)
|
||||
== ncached_max - i - 1, "");
|
||||
}
|
||||
/* Now we're empty -- all alloc attempts should fail. */
|
||||
expect_true(cache_bin_ncached_get_local(&bin) == 0, "");
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == 0, "");
|
||||
ptr = cache_bin_alloc_easy(&bin, &success);
|
||||
expect_ptr_null(ptr, "");
|
||||
expect_false(success, "");
|
||||
@ -184,7 +187,7 @@ TEST_BEGIN(test_cache_bin) {
|
||||
for (cache_bin_sz_t i = ncached_max / 2; i < ncached_max; i++) {
|
||||
cache_bin_dalloc_easy(&bin, &ptrs[i]);
|
||||
}
|
||||
expect_true(cache_bin_ncached_get_local(&bin) == ncached_max,
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info) == ncached_max,
|
||||
"");
|
||||
for (cache_bin_sz_t i = ncached_max - 1; i >= ncached_max / 2; i--) {
|
||||
/*
|
||||
@ -201,72 +204,77 @@ TEST_BEGIN(test_cache_bin) {
|
||||
expect_ptr_null(ptr, "");
|
||||
|
||||
/* We're going to test filling -- we must be empty to start. */
|
||||
while (cache_bin_ncached_get_local(&bin)) {
|
||||
while (cache_bin_ncached_get_local(&bin, bin_info)) {
|
||||
cache_bin_alloc(&bin, &success);
|
||||
expect_true(success, "");
|
||||
}
|
||||
|
||||
/* Test fill. */
|
||||
/* Try to fill all, succeed fully. */
|
||||
do_fill_test(&bin, ptrs, ncached_max, ncached_max,
|
||||
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max,
|
||||
ncached_max);
|
||||
/* Try to fill all, succeed partially. */
|
||||
do_fill_test(&bin, ptrs, ncached_max, ncached_max,
|
||||
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max,
|
||||
ncached_max / 2);
|
||||
/* Try to fill all, fail completely. */
|
||||
do_fill_test(&bin, ptrs, ncached_max, ncached_max, 0);
|
||||
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max, 0);
|
||||
|
||||
/* Try to fill some, succeed fully. */
|
||||
do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2,
|
||||
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2,
|
||||
ncached_max / 2);
|
||||
/* Try to fill some, succeed partially. */
|
||||
do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2,
|
||||
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2,
|
||||
ncached_max / 4);
|
||||
/* Try to fill some, fail completely. */
|
||||
do_fill_test(&bin, ptrs, ncached_max, ncached_max / 2, 0);
|
||||
do_fill_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2, 0);
|
||||
|
||||
do_flush_test(&bin, ptrs, ncached_max, ncached_max);
|
||||
do_flush_test(&bin, ptrs, ncached_max, ncached_max / 2);
|
||||
do_flush_test(&bin, ptrs, ncached_max, 0);
|
||||
do_flush_test(&bin, ptrs, ncached_max / 2, ncached_max / 2);
|
||||
do_flush_test(&bin, ptrs, ncached_max / 2, ncached_max / 4);
|
||||
do_flush_test(&bin, ptrs, ncached_max / 2, 0);
|
||||
do_flush_test(&bin, bin_info, ptrs, ncached_max, ncached_max);
|
||||
do_flush_test(&bin, bin_info, ptrs, ncached_max, ncached_max / 2);
|
||||
do_flush_test(&bin, bin_info, ptrs, ncached_max, 0);
|
||||
do_flush_test(&bin, bin_info, ptrs, ncached_max / 2, ncached_max / 2);
|
||||
do_flush_test(&bin, bin_info, ptrs, ncached_max / 2, ncached_max / 4);
|
||||
do_flush_test(&bin, bin_info, ptrs, ncached_max / 2, 0);
|
||||
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max, ncached_max);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max, ncached_max * 2);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max, ncached_max / 2);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max, 2);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max, 1);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max, 0);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, ncached_max / 2);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, ncached_max);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, ncached_max / 4);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, 2);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, 1);
|
||||
do_batch_alloc_test(&bin, ptrs, ncached_max / 2, 0);
|
||||
do_batch_alloc_test(&bin, ptrs, 2, ncached_max);
|
||||
do_batch_alloc_test(&bin, ptrs, 2, 2);
|
||||
do_batch_alloc_test(&bin, ptrs, 2, 1);
|
||||
do_batch_alloc_test(&bin, ptrs, 2, 0);
|
||||
do_batch_alloc_test(&bin, ptrs, 1, 2);
|
||||
do_batch_alloc_test(&bin, ptrs, 1, 1);
|
||||
do_batch_alloc_test(&bin, ptrs, 1, 0);
|
||||
do_batch_alloc_test(&bin, ptrs, 0, 2);
|
||||
do_batch_alloc_test(&bin, ptrs, 0, 1);
|
||||
do_batch_alloc_test(&bin, ptrs, 0, 0);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, ncached_max);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max,
|
||||
ncached_max * 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max,
|
||||
ncached_max / 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, 1);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max, 0);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2,
|
||||
ncached_max / 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2,
|
||||
ncached_max);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2,
|
||||
ncached_max / 4);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2, 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2, 1);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, ncached_max / 2, 0);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 2, ncached_max);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 2, 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 2, 1);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 2, 0);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 1, 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 1, 1);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 1, 0);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 0, 2);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 0, 1);
|
||||
do_batch_alloc_test(&bin, bin_info, ptrs, 0, 0);
|
||||
|
||||
free(ptrs);
|
||||
}
|
||||
TEST_END
|
||||
|
||||
static void
|
||||
do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
cache_bin_sz_t nstash) {
|
||||
expect_true(cache_bin_ncached_get_local(bin) == 0,
|
||||
do_flush_stashed_test(cache_bin_t *bin, cache_bin_info_t *info, void **ptrs,
|
||||
cache_bin_sz_t nfill, cache_bin_sz_t nstash) {
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
|
||||
"Bin not empty");
|
||||
expect_true(cache_bin_nstashed_get_local(bin) == 0,
|
||||
expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
|
||||
"Bin not empty");
|
||||
expect_true(nfill + nstash <= bin->bin_info.ncached_max, "Exceeded max");
|
||||
expect_true(nfill + nstash <= info->ncached_max, "Exceeded max");
|
||||
|
||||
bool ret;
|
||||
/* Fill */
|
||||
@ -274,7 +282,7 @@ do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
ret = cache_bin_dalloc_easy(bin, &ptrs[i]);
|
||||
expect_true(ret, "Unexpected fill failure");
|
||||
}
|
||||
expect_true(cache_bin_ncached_get_local(bin) == nfill,
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == nfill,
|
||||
"Wrong cached count");
|
||||
|
||||
/* Stash */
|
||||
@ -282,10 +290,10 @@ do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
ret = cache_bin_stash(bin, &ptrs[i + nfill]);
|
||||
expect_true(ret, "Unexpected stash failure");
|
||||
}
|
||||
expect_true(cache_bin_nstashed_get_local(bin) == nstash,
|
||||
expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
|
||||
"Wrong stashed count");
|
||||
|
||||
if (nfill + nstash == bin->bin_info.ncached_max) {
|
||||
if (nfill + nstash == info->ncached_max) {
|
||||
ret = cache_bin_dalloc_easy(bin, &ptrs[0]);
|
||||
expect_false(ret, "Should not dalloc into a full bin");
|
||||
ret = cache_bin_stash(bin, &ptrs[0]);
|
||||
@ -300,19 +308,19 @@ do_flush_stashed_test(cache_bin_t *bin, void **ptrs, cache_bin_sz_t nfill,
|
||||
expect_true((uintptr_t)ptr < (uintptr_t)&ptrs[nfill],
|
||||
"Should not alloc stashed ptrs");
|
||||
}
|
||||
expect_true(cache_bin_ncached_get_local(bin) == 0,
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
|
||||
"Wrong cached count");
|
||||
expect_true(cache_bin_nstashed_get_local(bin) == nstash,
|
||||
expect_true(cache_bin_nstashed_get_local(bin, info) == nstash,
|
||||
"Wrong stashed count");
|
||||
|
||||
cache_bin_alloc(bin, &ret);
|
||||
expect_false(ret, "Should not alloc stashed");
|
||||
|
||||
/* Clear stashed ones */
|
||||
cache_bin_finish_flush_stashed(bin);
|
||||
expect_true(cache_bin_ncached_get_local(bin) == 0,
|
||||
cache_bin_finish_flush_stashed(bin, info);
|
||||
expect_true(cache_bin_ncached_get_local(bin, info) == 0,
|
||||
"Wrong cached count");
|
||||
expect_true(cache_bin_nstashed_get_local(bin) == 0,
|
||||
expect_true(cache_bin_nstashed_get_local(bin, info) == 0,
|
||||
"Wrong stashed count");
|
||||
|
||||
cache_bin_alloc(bin, &ret);
|
||||
@ -326,6 +334,7 @@ TEST_BEGIN(test_cache_bin_stash) {
|
||||
cache_bin_info_t info;
|
||||
cache_bin_info_init(&info, ncached_max);
|
||||
test_bin_init(&bin, &info);
|
||||
cache_bin_info_t *bin_info = &bin.bin_info;
|
||||
|
||||
/*
|
||||
* The content of this array is not accessed; instead the interior
|
||||
@ -335,9 +344,9 @@ TEST_BEGIN(test_cache_bin_stash) {
|
||||
assert_ptr_not_null(ptrs, "Unexpected mallocx failure");
|
||||
bool ret;
|
||||
for (cache_bin_sz_t i = 0; i < ncached_max; i++) {
|
||||
expect_true(cache_bin_ncached_get_local(&bin) ==
|
||||
expect_true(cache_bin_ncached_get_local(&bin, bin_info) ==
|
||||
(i / 2 + i % 2), "Wrong ncached value");
|
||||
expect_true(cache_bin_nstashed_get_local(&bin) ==
|
||||
expect_true(cache_bin_nstashed_get_local(&bin, bin_info) ==
|
||||
i / 2, "Wrong nstashed value");
|
||||
if (i % 2 == 0) {
|
||||
cache_bin_dalloc_easy(&bin, &ptrs[i]);
|
||||
@ -360,21 +369,22 @@ TEST_BEGIN(test_cache_bin_stash) {
|
||||
expect_true(diff % 2 == 0, "Should be able to alloc");
|
||||
} else {
|
||||
expect_false(ret, "Should not alloc stashed");
|
||||
expect_true(cache_bin_nstashed_get_local(&bin) == ncached_max / 2,
|
||||
expect_true(cache_bin_nstashed_get_local(&bin,
|
||||
bin_info) == ncached_max / 2,
|
||||
"Wrong nstashed value");
|
||||
}
|
||||
}
|
||||
|
||||
test_bin_init(&bin, &info);
|
||||
do_flush_stashed_test(&bin, ptrs, ncached_max, 0);
|
||||
do_flush_stashed_test(&bin, ptrs, 0, ncached_max);
|
||||
do_flush_stashed_test(&bin, ptrs, ncached_max / 2,
|
||||
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max, 0);
|
||||
do_flush_stashed_test(&bin, bin_info, ptrs, 0, ncached_max);
|
||||
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 2,
|
||||
ncached_max / 2);
|
||||
do_flush_stashed_test(&bin, ptrs, ncached_max / 4,
|
||||
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 4,
|
||||
ncached_max / 2);
|
||||
do_flush_stashed_test(&bin, ptrs, ncached_max / 2,
|
||||
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 2,
|
||||
ncached_max / 4);
|
||||
do_flush_stashed_test(&bin, ptrs, ncached_max / 4,
|
||||
do_flush_stashed_test(&bin, bin_info, ptrs, ncached_max / 4,
|
||||
ncached_max / 4);
|
||||
}
|
||||
TEST_END
|
||||
|
@ -1,268 +0,0 @@
|
||||
#include "test/jemalloc_test.h"
|
||||
#include "test/san.h"
|
||||
|
||||
const char *malloc_conf =
|
||||
"tcache_ncached_max:256-1024:1001|2048-2048:0|8192-8192:1,tcache_max:4096";
|
||||
extern void tcache_bin_info_compute(
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]);
|
||||
extern bool tcache_get_default_ncached_max_set(szind_t ind);
|
||||
extern const cache_bin_info_t *tcache_get_default_ncached_max(void);
|
||||
|
||||
static void
|
||||
check_bins_info(cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
|
||||
size_t mib_get[4], mib_get_len;
|
||||
mib_get_len = sizeof(mib_get) / sizeof(size_t);
|
||||
const char *get_name = "thread.tcache.ncached_max.read_sizeclass";
|
||||
size_t ncached_max;
|
||||
size_t sz = sizeof(size_t);
|
||||
expect_d_eq(mallctlnametomib(get_name, mib_get, &mib_get_len), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
|
||||
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
|
||||
size_t bin_size = sz_index2size(i);
|
||||
expect_d_eq(mallctlbymib(mib_get, mib_get_len,
|
||||
(void *)&ncached_max, &sz,
|
||||
(void *)&bin_size, sizeof(size_t)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
expect_zu_eq(ncached_max, tcache_bin_info[i].ncached_max,
|
||||
"Unexpected ncached_max for bin %d", i);
|
||||
/* Check ncached_max returned under a non-bin size. */
|
||||
bin_size--;
|
||||
size_t temp_ncached_max = 0;
|
||||
expect_d_eq(mallctlbymib(mib_get, mib_get_len,
|
||||
(void *)&temp_ncached_max, &sz,
|
||||
(void *)&bin_size, sizeof(size_t)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
expect_zu_eq(temp_ncached_max, ncached_max,
|
||||
"Unexpected ncached_max for inaccurate bin size.");
|
||||
}
|
||||
}
|
||||
|
||||
static void *
|
||||
ncached_max_check(void* args) {
|
||||
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX];
|
||||
cache_bin_info_t tcache_bin_info_backup[TCACHE_NBINS_MAX];
|
||||
tsd_t *tsd = tsd_fetch();
|
||||
tcache_t *tcache = tsd_tcachep_get(tsd);
|
||||
assert(tcache != NULL);
|
||||
tcache_slow_t *tcache_slow = tcache->tcache_slow;
|
||||
|
||||
|
||||
tcache_bin_info_compute(tcache_bin_info);
|
||||
memcpy(tcache_bin_info_backup, tcache_bin_info,
|
||||
sizeof(tcache_bin_info));
|
||||
/* Check ncached_max set by malloc_conf. */
|
||||
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
|
||||
bool first_range = (i >= sz_size2index(256) &&
|
||||
i <= sz_size2index(1024));
|
||||
bool second_range = (i == sz_size2index(2048));
|
||||
bool third_range = (i == sz_size2index(8192));
|
||||
cache_bin_sz_t target_ncached_max = 0;
|
||||
if (first_range || second_range || third_range) {
|
||||
target_ncached_max = first_range ? 1001:
|
||||
(second_range ? 0: 1);
|
||||
expect_true(tcache_get_default_ncached_max_set(i),
|
||||
"Unexpected state for bin %u", i);
|
||||
expect_zu_eq(target_ncached_max,
|
||||
tcache_bin_info[i].ncached_max,
|
||||
"Unexpected generated ncached_max for bin %u", i);
|
||||
expect_zu_eq(target_ncached_max,
|
||||
tcache_get_default_ncached_max()[i].ncached_max,
|
||||
"Unexpected pre-set ncached_max for bin %u", i);
|
||||
} else {
|
||||
expect_false(tcache_get_default_ncached_max_set(i),
|
||||
"Unexpected state for bin %u", i);
|
||||
}
|
||||
}
|
||||
unsigned nbins = tcache_nbins_get(tcache_slow);
|
||||
for (szind_t i = nbins; i < TCACHE_NBINS_MAX; i++) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
/* Check the initial bin settings. */
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
size_t mib_set[4], mib_set_len;
|
||||
mib_set_len = sizeof(mib_set) / sizeof(size_t);
|
||||
const char *set_name = "thread.tcache.ncached_max.write";
|
||||
expect_d_eq(mallctlnametomib(set_name, mib_set, &mib_set_len), 0,
|
||||
"Unexpected mallctlnametomib() failure");
|
||||
|
||||
/* Test the ncached_max set with tcache on. */
|
||||
char inputs[100] = "8-128:1|160-160:11|170-320:22|224-8388609:0";
|
||||
char *inputp = inputs;
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
|
||||
if (i >= sz_size2index(8) &&i <= sz_size2index(128)) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 1);
|
||||
}
|
||||
if (i == sz_size2index(160)) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 11);
|
||||
}
|
||||
if (i >= sz_size2index(170) && i <= sz_size2index(320)) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 22);
|
||||
}
|
||||
if (i >= sz_size2index(224)) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
if (i >= nbins) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
}
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/*
|
||||
* Close the tcache and set ncached_max of some bins. It will be
|
||||
* set properly but thread.tcache.ncached_max.read still returns 0
|
||||
* since the bin is not available yet. After enabling the tcache,
|
||||
* the new setting will not be carried on. Instead, the default
|
||||
* settings will be applied.
|
||||
*/
|
||||
bool e0 = false, e1;
|
||||
size_t bool_sz = sizeof(bool);
|
||||
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
|
||||
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
|
||||
expect_true(e1, "Unexpected previous tcache state");
|
||||
strcpy(inputs, "0-112:8");
|
||||
/* Setting returns ENOENT when the tcache is disabled. */
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), ENOENT,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
/* All ncached_max should return 0 once tcache is disabled. */
|
||||
for (szind_t i = 0; i < TCACHE_NBINS_MAX; i++) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
e0 = true;
|
||||
expect_d_eq(mallctl("thread.tcache.enabled", (void *)&e1, &bool_sz,
|
||||
(void *)&e0, bool_sz), 0, "Unexpected mallctl() error");
|
||||
expect_false(e1, "Unexpected previous tcache state");
|
||||
memcpy(tcache_bin_info, tcache_bin_info_backup,
|
||||
sizeof(tcache_bin_info_backup));
|
||||
for (szind_t i = tcache_nbins_get(tcache_slow); i < TCACHE_NBINS_MAX;
|
||||
i++) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/*
|
||||
* Set ncached_max of bins not enabled yet. Then, enable them by
|
||||
* resetting tcache_max. The ncached_max changes should stay.
|
||||
*/
|
||||
size_t tcache_max = 1024;
|
||||
assert_d_eq(mallctl("thread.tcache.max",
|
||||
NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0,
|
||||
"Unexpected.mallctl().failure");
|
||||
for (szind_t i = sz_size2index(1024) + 1; i < TCACHE_NBINS_MAX; i++) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
strcpy(inputs, "2048-6144:123");
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
tcache_max = 6144;
|
||||
assert_d_eq(mallctl("thread.tcache.max",
|
||||
NULL, NULL, (void *)&tcache_max, sizeof(size_t)),.0,
|
||||
"Unexpected.mallctl().failure");
|
||||
memcpy(tcache_bin_info, tcache_bin_info_backup,
|
||||
sizeof(tcache_bin_info_backup));
|
||||
for (szind_t i = sz_size2index(2048); i < TCACHE_NBINS_MAX; i++) {
|
||||
if (i <= sz_size2index(6144)) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 123);
|
||||
} else if (i > sz_size2index(6144)) {
|
||||
cache_bin_info_init(&tcache_bin_info[i], 0);
|
||||
}
|
||||
}
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/* Test an empty input, it should do nothing. */
|
||||
strcpy(inputs, "");
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/* Test a half-done string, it should return EINVAL and do nothing. */
|
||||
strcpy(inputs, "4-1024:7|256-1024");
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), EINVAL,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/*
|
||||
* Test an invalid string with start size larger than end size. It
|
||||
* should return success but do nothing.
|
||||
*/
|
||||
strcpy(inputs, "1024-256:7");
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/*
|
||||
* Test a string exceeding the length limit, it should return EINVAL
|
||||
* and do nothing.
|
||||
*/
|
||||
char *long_inputs = (char *)malloc(10000 * sizeof(char));
|
||||
expect_true(long_inputs != NULL, "Unexpected allocation failure.");
|
||||
for (int i = 0; i < 200; i++) {
|
||||
memcpy(long_inputs + i * 9, "4-1024:3|", 9);
|
||||
}
|
||||
memcpy(long_inputs + 200 * 9, "4-1024:3", 8);
|
||||
long_inputs[200 * 9 + 8] = '\0';
|
||||
inputp = long_inputs;
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), EINVAL,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
check_bins_info(tcache_bin_info);
|
||||
free(long_inputs);
|
||||
|
||||
/*
|
||||
* Test a string with invalid characters, it should return EINVAL
|
||||
* and do nothing.
|
||||
*/
|
||||
strcpy(inputs, "k8-1024:77p");
|
||||
inputp = inputs;
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), EINVAL,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
/* Test large ncached_max, it should return success but capped. */
|
||||
strcpy(inputs, "1024-1024:65540");
|
||||
expect_d_eq(mallctlbymib(mib_set, mib_set_len, NULL, NULL,
|
||||
(void *)&inputp, sizeof(char *)), 0,
|
||||
"Unexpected mallctlbymib() failure");
|
||||
cache_bin_info_init(&tcache_bin_info[sz_size2index(1024)],
|
||||
CACHE_BIN_NCACHED_MAX);
|
||||
check_bins_info(tcache_bin_info);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
TEST_BEGIN(test_ncached_max) {
|
||||
test_skip_if(!config_stats);
|
||||
test_skip_if(!opt_tcache);
|
||||
test_skip_if(san_uaf_detection_enabled());
|
||||
/* TODO: change nthreads to 8 to reduce CI loads. */
|
||||
unsigned nthreads = 108;
|
||||
VARIABLE_ARRAY(thd_t, threads, nthreads);
|
||||
for (unsigned i = 0; i < nthreads; i++) {
|
||||
thd_create(&threads[i], ncached_max_check, NULL);
|
||||
}
|
||||
for (unsigned i = 0; i < nthreads; i++) {
|
||||
thd_join(threads[i], NULL);
|
||||
}
|
||||
}
|
||||
TEST_END
|
||||
|
||||
int
|
||||
main(void) {
|
||||
return test(
|
||||
test_ncached_max);
|
||||
}
|
||||
|
@ -81,7 +81,8 @@ tcache_bytes_read_local(void) {
|
||||
if (tcache_bin_disabled(i, cache_bin, tcache->tcache_slow)) {
|
||||
continue;
|
||||
}
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin);
|
||||
cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
|
||||
&cache_bin->bin_info);
|
||||
tcache_bytes += ncached * sz_index2size(i);
|
||||
}
|
||||
return tcache_bytes;
|
||||
@ -259,7 +260,7 @@ tcache_check(void *arg) {
|
||||
expect_zu_eq(old_tcache_max, opt_tcache_max,
|
||||
"Unexpected default value for tcache_max");
|
||||
tcache_nbins = tcache_nbins_get(tcache_slow);
|
||||
expect_zu_eq(tcache_nbins, (size_t)global_do_not_change_tcache_nbins,
|
||||
expect_zu_eq(tcache_nbins, (size_t)global_do_not_change_nbins,
|
||||
"Unexpected default value for tcache_nbins");
|
||||
validate_tcache_stack(tcache);
|
||||
|
||||
@ -369,3 +370,4 @@ main(void) {
|
||||
test_tcache_max,
|
||||
test_thread_tcache_max);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user