server-skynet-source-3rd-je.../include/jemalloc/internal/tcache_inlines.h
David Goldblatt 79f1ee2fc0 Move junking out of arena/tcache code.
This is debug only and we keep it off the fast path.  Moving it here simplifies
the internal logic.

This never tries to junk on regions that were shrunk via xallocx.  I think this
is fine for two reasons:
- The shrunk-with-xallocx case is rare.
- We don't always do that anyway before this diff (it depends on the opt
  settings and extent hooks in effect).
2020-03-12 11:54:19 -07:00

168 lines
4.4 KiB
C

#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/util.h"
static inline bool
tcache_enabled_get(tsd_t *tsd) {
return tsd_tcache_enabled_get(tsd);
}
static inline void
tcache_enabled_set(tsd_t *tsd, bool enabled) {
bool was_enabled = tsd_tcache_enabled_get(tsd);
if (!was_enabled && enabled) {
tsd_tcache_data_init(tsd);
} else if (was_enabled && !enabled) {
tcache_cleanup(tsd);
}
/* Commit the state last. Above calls check current state. */
tsd_tcache_enabled_set(tsd, enabled);
tsd_slow_update(tsd);
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t binind, bool zero, bool slow_path) {
void *ret;
cache_bin_t *bin;
bool tcache_success;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < SC_NBINS);
bin = tcache_small_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success, binind);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return NULL;
}
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
bin, binind, &tcache_hard_success);
if (tcache_hard_success == false) {
return NULL;
}
}
assert(ret);
/*
* Only compute usize if required. The checks in the following if
* statement are all static.
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (unlikely(zero)) {
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
cache_bin_t *bin;
bool tcache_success;
assert(binind >= SC_NBINS &&binind < nhbins);
bin = tcache_large_bin_get(tcache, binind);
ret = cache_bin_alloc_easy(bin, &tcache_success, binind);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
* Only allocate one large object at a time, because it's quite
* expensive to create one and not use it.
*/
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) {
return NULL;
}
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) {
return NULL;
}
} else {
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
/* Only compute usize on demand */
if (config_prof || (slow_path && config_fill) ||
unlikely(zero)) {
usize = sz_index2size(binind);
assert(usize <= tcache_maxclass);
}
if (unlikely(zero)) {
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
}
return ret;
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
cache_bin_t *bin;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
<= SC_SMALL_MAXCLASS);
bin = tcache_small_bin_get(tcache, binind);
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_ncached_max_get(binind) >> 1;
tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
cache_bin_t *bin;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
bin = tcache_large_bin_get(tcache, binind);
if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
unsigned remain = cache_bin_ncached_max_get(binind) >> 1;
tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
}
JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind) {
tcaches_t *elm = &tcaches[ind];
if (unlikely(elm->tcache == NULL)) {
malloc_printf("<jemalloc>: invalid tcache id (%u).\n", ind);
abort();
} else if (unlikely(elm->tcache == TCACHES_ELM_NEED_REINIT)) {
elm->tcache = tcache_create_explicit(tsd);
}
return elm->tcache;
}
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */