server-skynet-source-3rd-je.../include/jemalloc/internal/tcache_inlines.h

245 lines
7.1 KiB
C
Raw Normal View History

#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/tcache_externs.h"
#include "jemalloc/internal/util.h"
static inline bool
tcache_enabled_get(tsd_t *tsd) {
return tsd_tcache_enabled_get(tsd);
}
static inline unsigned
tcache_nbins_get(tcache_slow_t *tcache_slow) {
assert(tcache_slow != NULL);
unsigned nbins = tcache_slow->tcache_nbins;
assert(nbins <= TCACHE_NBINS_MAX);
return nbins;
}
static inline size_t
tcache_max_get(tcache_slow_t *tcache_slow) {
assert(tcache_slow != NULL);
size_t tcache_max = sz_index2size(tcache_nbins_get(tcache_slow) - 1);
assert(tcache_max <= TCACHE_MAXCLASS_LIMIT);
return tcache_max;
}
static inline void
tcache_max_set(tcache_slow_t *tcache_slow, size_t tcache_max) {
assert(tcache_slow != NULL);
assert(tcache_max <= TCACHE_MAXCLASS_LIMIT);
tcache_slow->tcache_nbins = sz_size2index(tcache_max) + 1;
}
static inline void
tcache_bin_settings_backup(tcache_t *tcache,
cache_bin_info_t tcache_bin_info[TCACHE_NBINS_MAX]) {
for (unsigned i = 0; i < TCACHE_NBINS_MAX; i++) {
cache_bin_info_init(&tcache_bin_info[i],
cache_bin_ncached_max_get_unsafe(&tcache->bins[i]));
}
}
JEMALLOC_ALWAYS_INLINE bool
tcache_bin_disabled(szind_t ind, cache_bin_t *bin,
tcache_slow_t *tcache_slow) {
assert(bin != NULL);
assert(ind < TCACHE_NBINS_MAX);
bool disabled = cache_bin_disabled(bin);
/*
* If a bin's ind >= nbins or ncached_max == 0, it must be disabled.
* However, when ind < nbins, it could be either enabled
* (ncached_max > 0) or disabled (ncached_max == 0). Similarly, when
* ncached_max > 0, it could be either enabled (ind < nbins) or
* disabled (ind >= nbins). Thus, if a bin is disabled, it has either
* ind >= nbins or ncached_max == 0. If a bin is enabled, it has
* ind < nbins and ncached_max > 0.
*/
unsigned nbins = tcache_nbins_get(tcache_slow);
cache_bin_sz_t ncached_max = cache_bin_ncached_max_get_unsafe(bin);
if (ind >= nbins) {
assert(disabled);
} else {
assert(!disabled || ncached_max == 0);
}
if (ncached_max == 0) {
assert(disabled);
} else {
assert(!disabled || ind >= nbins);
}
if (disabled) {
assert(ind >= nbins || ncached_max == 0);
} else {
assert(ind < nbins && ncached_max > 0);
}
return disabled;
}
JEMALLOC_ALWAYS_INLINE void *
2018-04-17 03:08:27 +08:00
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
Clean compilation -Wextra Before this commit jemalloc produced many warnings when compiled with -Wextra with both Clang and GCC. This commit fixes the issues raised by these warnings or suppresses them if they were spurious at least for the Clang and GCC versions covered by CI. This commit: * adds `JEMALLOC_DIAGNOSTIC` macros: `JEMALLOC_DIAGNOSTIC_{PUSH,POP}` are used to modify the stack of enabled diagnostics. The `JEMALLOC_DIAGNOSTIC_IGNORE_...` macros are used to ignore a concrete diagnostic. * adds `JEMALLOC_FALLTHROUGH` macro to explicitly state that falling through `case` labels in a `switch` statement is intended * Removes all UNUSED annotations on function parameters. The warning -Wunused-parameter is now disabled globally in `jemalloc_internal_macros.h` for all translation units that include that header. It is never re-enabled since that header cannot be included by users. * locally suppresses some -Wextra diagnostics: * `-Wmissing-field-initializer` is buggy in older Clang and GCC versions, where it does not understanding that, in C, `= {0}` is a common C idiom to initialize a struct to zero * `-Wtype-bounds` is suppressed in a particular situation where a generic macro, used in multiple different places, compares an unsigned integer for smaller than zero, which is always true. * `-Walloc-larger-than-size=` diagnostics warn when an allocation function is called with a size that is too large (out-of-range). These are suppressed in the parts of the tests where `jemalloc` explicitly does this to test that the allocation functions fail properly. * adds a new CI build bot that runs the log unit test on CI. Closes #1196 .
2018-05-03 17:40:53 +08:00
size_t size, szind_t binind, bool zero, bool slow_path) {
void *ret;
bool tcache_success;
assert(binind < SC_NBINS);
cache_bin_t *bin = &tcache->bins[binind];
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return NULL;
}
if (unlikely(tcache_bin_disabled(binind, bin,
tcache->tcache_slow))) {
/* stats and zero are handled directly by the arena. */
return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
binind, zero, /* slab */ true);
}
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ true);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
bin, binind, &tcache_hard_success);
if (tcache_hard_success == false) {
return NULL;
}
}
assert(ret);
if (unlikely(zero)) {
size_t usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
2016-06-01 05:50:21 +08:00
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
bool tcache_success;
cache_bin_t *bin = &tcache->bins[binind];
assert(binind >= SC_NBINS &&
!tcache_bin_disabled(binind, bin, tcache->tcache_slow));
ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
2016-06-01 05:50:21 +08:00
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return NULL;
}
tcache_bin_flush_stashed(tsd, tcache, bin, binind,
/* is_small */ false);
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) {
return NULL;
}
} else {
if (unlikely(zero)) {
size_t usize = sz_index2size(binind);
assert(usize <= tcache_max_get(tcache->tcache_slow));
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
cache_bin_t *bin = &tcache->bins[binind];
/*
* Not marking the branch unlikely because this is past free_fastpath()
* (which handles the most common cases), i.e. at this point it's often
* uncommon cases.
*/
if (cache_bin_nonfast_aligned(ptr)) {
/* Junk unconditionally, even if bin is full. */
san_junk_ptr(ptr, sz_index2size(binind));
if (cache_bin_stash(bin, ptr)) {
return;
}
assert(cache_bin_full(bin));
/* Bin full; fall through into the flush branch. */
}
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
if (unlikely(tcache_bin_disabled(binind, bin,
tcache->tcache_slow))) {
arena_dalloc_small(tsd_tsdn(tsd), ptr);
return;
}
cache_bin_sz_t max = cache_bin_info_ncached_max_get(
bin, &bin->bin_info);
unsigned remain = max >> opt_lg_tcache_flush_small_div;
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <=
tcache_max_get(tcache->tcache_slow));
assert(!tcache_bin_disabled(binind, &tcache->bins[binind],
tcache->tcache_slow));
cache_bin_t *bin = &tcache->bins[binind];
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_info_ncached_max_get(
bin, &bin->bin_info) >> opt_lg_tcache_flush_large_div;
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind) {
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
abort();
} else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
elm->tcache = tcache_create_explicit(tsd);
}
return elm->tcache;
}
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */