2017-01-11 10:06:31 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
|
|
|
#define JEMALLOC_INTERNAL_TCACHE_INLINES_H
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-18 06:52:44 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_internal_types.h"
|
2017-04-12 04:31:16 +08:00
|
|
|
#include "jemalloc/internal/util.h"
|
|
|
|
|
2010-01-17 01:53:50 +08:00
|
|
|
#ifndef JEMALLOC_ENABLE_INLINE
|
2015-01-30 07:30:47 +08:00
|
|
|
void tcache_event(tsd_t *tsd, tcache_t *tcache);
|
2012-03-27 09:54:44 +08:00
|
|
|
void tcache_flush(void);
|
2017-03-28 12:50:38 +08:00
|
|
|
bool tcache_enabled_get(tsd_t *tsd);
|
|
|
|
tcache_t *tcache_get(tsd_t *tsd);
|
|
|
|
void tcache_enabled_set(tsd_t *tsd, bool enabled);
|
2015-10-28 06:12:10 +08:00
|
|
|
void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success);
|
2015-02-14 07:28:56 +08:00
|
|
|
void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
2015-10-28 06:12:10 +08:00
|
|
|
size_t size, szind_t ind, bool zero, bool slow_path);
|
2016-06-01 05:50:21 +08:00
|
|
|
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
|
2015-10-28 06:12:10 +08:00
|
|
|
size_t size, szind_t ind, bool zero, bool slow_path);
|
2015-01-30 07:30:47 +08:00
|
|
|
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
2015-10-28 06:12:10 +08:00
|
|
|
szind_t binind, bool slow_path);
|
2016-06-01 05:50:21 +08:00
|
|
|
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
|
2017-03-14 08:36:57 +08:00
|
|
|
szind_t binind, bool slow_path);
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
|
2010-01-17 01:53:50 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_))
|
2012-03-27 09:54:44 +08:00
|
|
|
JEMALLOC_INLINE bool
|
2017-03-28 12:50:38 +08:00
|
|
|
tcache_enabled_get(tsd_t *tsd) {
|
2012-03-27 09:54:44 +08:00
|
|
|
cassert(config_tcache);
|
|
|
|
|
2017-04-06 10:23:41 +08:00
|
|
|
return tsd_tcache_enabled_get(tsd);
|
2012-03-27 09:54:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_INLINE void
|
2017-03-28 12:50:38 +08:00
|
|
|
tcache_enabled_set(tsd_t *tsd, bool enabled) {
|
2012-03-27 09:54:44 +08:00
|
|
|
cassert(config_tcache);
|
|
|
|
|
2017-04-06 10:23:41 +08:00
|
|
|
bool was_enabled = tsd_tcache_enabled_get(tsd);
|
2014-09-23 12:09:23 +08:00
|
|
|
|
2017-04-06 10:23:41 +08:00
|
|
|
if (!was_enabled && enabled) {
|
2017-03-28 12:50:38 +08:00
|
|
|
tsd_tcache_data_init(tsd);
|
2017-04-06 10:23:41 +08:00
|
|
|
} else if (was_enabled && !enabled) {
|
2014-09-23 12:09:23 +08:00
|
|
|
tcache_cleanup(tsd);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2017-03-28 12:50:38 +08:00
|
|
|
/* Commit the state last. Above calls check current state. */
|
2017-04-06 10:23:41 +08:00
|
|
|
tsd_tcache_enabled_set(tsd, enabled);
|
2017-04-12 14:13:45 +08:00
|
|
|
tsd_slow_update(tsd);
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-01-16 08:56:30 +08:00
|
|
|
tcache_event(tsd_t *tsd, tcache_t *tcache) {
|
|
|
|
if (TCACHE_GC_INCR == 0) {
|
2010-01-17 01:53:50 +08:00
|
|
|
return;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(ticker_tick(&tcache->gc_ticker))) {
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_event_hard(tsd, tcache);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2017-01-16 08:56:30 +08:00
|
|
|
tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) {
|
2010-03-08 07:34:14 +08:00
|
|
|
void *ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(tbin->ncached == 0)) {
|
2011-03-21 15:18:17 +08:00
|
|
|
tbin->low_water = -1;
|
2015-10-28 06:12:10 +08:00
|
|
|
*tcache_success = false;
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2011-03-21 15:18:17 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
/*
|
|
|
|
* tcache_success (instead of ret) should be checked upon the return of
|
|
|
|
* this function. We avoid checking (ret == NULL) because there is
|
|
|
|
* never a null stored on the avail stack (which is unknown to the
|
|
|
|
* compiler), and eagerly checking ret would cause pipeline stall
|
|
|
|
* (waiting for the cacheline).
|
|
|
|
*/
|
|
|
|
*tcache_success = true;
|
|
|
|
ret = *(tbin->avail - tbin->ncached);
|
2010-01-17 01:53:50 +08:00
|
|
|
tbin->ncached--;
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2017-04-07 03:35:22 +08:00
|
|
|
if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) {
|
2010-01-17 01:53:50 +08:00
|
|
|
tbin->low_water = tbin->ncached;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2015-02-14 07:28:56 +08:00
|
|
|
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
szind_t binind, bool zero, bool slow_path) {
|
2010-01-17 01:53:50 +08:00
|
|
|
void *ret;
|
2010-03-08 07:34:14 +08:00
|
|
|
tcache_bin_t *tbin;
|
2015-10-28 06:12:10 +08:00
|
|
|
bool tcache_success;
|
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2012-02-29 08:50:47 +08:00
|
|
|
assert(binind < NBINS);
|
2017-04-07 03:35:22 +08:00
|
|
|
tbin = tcache_small_bin_get(tcache, binind);
|
2015-10-28 06:12:10 +08:00
|
|
|
ret = tcache_alloc_easy(tbin, &tcache_success);
|
|
|
|
assert(tcache_success == (ret != NULL));
|
|
|
|
if (unlikely(!tcache_success)) {
|
|
|
|
bool tcache_hard_success;
|
2016-05-04 06:00:42 +08:00
|
|
|
arena = arena_choose(tsd, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(arena == NULL)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
|
|
|
|
tbin, binind, &tcache_hard_success);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (tcache_hard_success == false) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
|
|
|
assert(ret);
|
|
|
|
/*
|
|
|
|
* Only compute usize if required. The checks in the following if
|
|
|
|
* statement are all static.
|
|
|
|
*/
|
|
|
|
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
|
|
|
|
usize = index2size(binind);
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
|
2015-10-28 06:12:10 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (likely(!zero)) {
|
2015-10-28 06:12:10 +08:00
|
|
|
if (slow_path && config_fill) {
|
2014-12-09 05:12:41 +08:00
|
|
|
if (unlikely(opt_junk_alloc)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_alloc_junk_small(ret,
|
|
|
|
&arena_bin_info[binind], false);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if (unlikely(opt_zero)) {
|
2014-10-06 08:54:10 +08:00
|
|
|
memset(ret, 0, usize);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2012-04-06 15:35:09 +08:00
|
|
|
} else {
|
2015-10-28 06:12:10 +08:00
|
|
|
if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_alloc_junk_small(ret, &arena_bin_info[binind],
|
|
|
|
true);
|
|
|
|
}
|
2014-10-06 08:54:10 +08:00
|
|
|
memset(ret, 0, usize);
|
2012-04-06 15:35:09 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_stats) {
|
2012-02-11 12:22:09 +08:00
|
|
|
tbin->tstats.nrequests++;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (config_prof) {
|
2014-10-06 08:54:10 +08:00
|
|
|
tcache->prof_accumbytes += usize;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_event(tsd, tcache);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2016-06-01 05:50:21 +08:00
|
|
|
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
|
2017-01-16 08:56:30 +08:00
|
|
|
szind_t binind, bool zero, bool slow_path) {
|
2010-03-18 07:27:39 +08:00
|
|
|
void *ret;
|
|
|
|
tcache_bin_t *tbin;
|
2015-10-28 06:12:10 +08:00
|
|
|
bool tcache_success;
|
2010-03-18 07:27:39 +08:00
|
|
|
|
2017-04-07 03:35:22 +08:00
|
|
|
assert(binind >= NBINS &&binind < nhbins);
|
|
|
|
tbin = tcache_large_bin_get(tcache, binind);
|
2015-10-28 06:12:10 +08:00
|
|
|
ret = tcache_alloc_easy(tbin, &tcache_success);
|
|
|
|
assert(tcache_success == (ret != NULL));
|
|
|
|
if (unlikely(!tcache_success)) {
|
2010-03-18 07:27:39 +08:00
|
|
|
/*
|
2016-06-01 05:50:21 +08:00
|
|
|
* Only allocate one large object at a time, because it's quite
|
2010-03-18 07:27:39 +08:00
|
|
|
* expensive to create one and not use it.
|
|
|
|
*/
|
2016-05-04 06:00:42 +08:00
|
|
|
arena = arena_choose(tsd, arena);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (unlikely(arena == NULL)) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2015-10-28 06:12:10 +08:00
|
|
|
|
2016-06-01 05:50:21 +08:00
|
|
|
ret = large_malloc(tsd_tsdn(tsd), arena, s2u(size), zero);
|
2017-01-16 08:56:30 +08:00
|
|
|
if (ret == NULL) {
|
2017-01-20 10:15:45 +08:00
|
|
|
return NULL;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-03-18 07:27:39 +08:00
|
|
|
} else {
|
2016-02-26 07:29:49 +08:00
|
|
|
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
|
|
|
|
|
2015-10-28 06:12:10 +08:00
|
|
|
/* Only compute usize on demand */
|
2016-02-26 07:29:49 +08:00
|
|
|
if (config_prof || (slow_path && config_fill) ||
|
|
|
|
unlikely(zero)) {
|
2015-10-28 06:12:10 +08:00
|
|
|
usize = index2size(binind);
|
|
|
|
assert(usize <= tcache_maxclass);
|
|
|
|
}
|
|
|
|
|
2014-10-04 01:16:09 +08:00
|
|
|
if (likely(!zero)) {
|
2015-10-28 06:12:10 +08:00
|
|
|
if (slow_path && config_fill) {
|
2016-03-28 14:28:39 +08:00
|
|
|
if (unlikely(opt_junk_alloc)) {
|
|
|
|
memset(ret, JEMALLOC_ALLOC_JUNK,
|
|
|
|
usize);
|
2017-01-16 08:56:30 +08:00
|
|
|
} else if (unlikely(opt_zero)) {
|
2014-10-06 08:54:10 +08:00
|
|
|
memset(ret, 0, usize);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2012-02-11 12:22:09 +08:00
|
|
|
}
|
2017-01-16 08:56:30 +08:00
|
|
|
} else {
|
2014-10-06 08:54:10 +08:00
|
|
|
memset(ret, 0, usize);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-03-18 07:27:39 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (config_stats) {
|
2012-02-11 12:22:09 +08:00
|
|
|
tbin->tstats.nrequests++;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
|
|
|
if (config_prof) {
|
2014-10-06 08:54:10 +08:00
|
|
|
tcache->prof_accumbytes += usize;
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-03-18 07:27:39 +08:00
|
|
|
}
|
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_event(tsd, tcache);
|
2017-01-20 10:15:45 +08:00
|
|
|
return ret;
|
2010-03-18 07:27:39 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2015-10-28 06:12:10 +08:00
|
|
|
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool slow_path) {
|
2010-01-17 01:53:50 +08:00
|
|
|
tcache_bin_t *tbin;
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
tcache_bin_info_t *tbin_info;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
|
2010-04-01 07:45:04 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
2012-04-06 15:35:09 +08:00
|
|
|
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2017-04-07 03:35:22 +08:00
|
|
|
tbin = tcache_small_bin_get(tcache, binind);
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
tbin_info = &tcache_bin_info[binind];
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
2015-02-14 07:28:56 +08:00
|
|
|
tcache_bin_flush_small(tsd, tcache, tbin, binind,
|
|
|
|
(tbin_info->ncached_max >> 1));
|
2010-03-18 07:27:39 +08:00
|
|
|
}
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
assert(tbin->ncached < tbin_info->ncached_max);
|
2010-03-18 07:27:39 +08:00
|
|
|
tbin->ncached++;
|
2015-10-28 06:12:10 +08:00
|
|
|
*(tbin->avail - tbin->ncached) = ptr;
|
2010-03-18 07:27:39 +08:00
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_event(tsd, tcache);
|
2010-03-18 07:27:39 +08:00
|
|
|
}
|
|
|
|
|
2013-01-23 00:45:43 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void
|
2017-03-14 08:36:57 +08:00
|
|
|
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
|
2017-01-16 08:56:30 +08:00
|
|
|
bool slow_path) {
|
2010-03-18 07:27:39 +08:00
|
|
|
tcache_bin_t *tbin;
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
tcache_bin_info_t *tbin_info;
|
2010-03-18 07:27:39 +08:00
|
|
|
|
2016-05-11 13:21:10 +08:00
|
|
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
|
|
|
|
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
|
2010-03-18 07:27:39 +08:00
|
|
|
|
2017-01-16 08:56:30 +08:00
|
|
|
if (slow_path && config_fill && unlikely(opt_junk_free)) {
|
2017-03-14 08:36:57 +08:00
|
|
|
large_dalloc_junk(ptr, index2size(binind));
|
2017-01-16 08:56:30 +08:00
|
|
|
}
|
2010-03-18 07:27:39 +08:00
|
|
|
|
2017-04-07 03:35:22 +08:00
|
|
|
tbin = tcache_large_bin_get(tcache, binind);
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
tbin_info = &tcache_bin_info[binind];
|
2014-09-12 07:20:44 +08:00
|
|
|
if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
|
2016-06-01 05:50:21 +08:00
|
|
|
tcache_bin_flush_large(tsd, tbin, binind,
|
2015-01-30 07:30:47 +08:00
|
|
|
(tbin_info->ncached_max >> 1), tcache);
|
2010-03-08 07:34:14 +08:00
|
|
|
}
|
Use bitmaps to track small regions.
The previous free list implementation, which embedded singly linked
lists in available regions, had the unfortunate side effect of causing
many cache misses during thread cache fills. Fix this in two places:
- arena_run_t: Use a new bitmap implementation to track which regions
are available. Furthermore, revert to preferring the
lowest available region (as jemalloc did with its old
bitmap-based approach).
- tcache_t: Move read-only tcache_bin_t metadata into
tcache_bin_info_t, and add a contiguous array of pointers
to tcache_t in order to track cached objects. This
substantially increases the size of tcache_t, but results
in much higher data locality for common tcache operations.
As a side benefit, it is again possible to efficiently
flush the least recently used cached objects, so this
change changes flushing from MRU to LRU.
The new bitmap implementation uses a multi-level summary approach to
make finding the lowest available region very fast. In practice,
bitmaps only have one or two levels, though the implementation is
general enough to handle extremely large bitmaps, mainly so that large
page sizes can still be entertained.
Fix tcache_bin_flush_large() to always flush statistics, in the same way
that tcache_bin_flush_small() was recently fixed.
Use JEMALLOC_DEBUG rather than NDEBUG.
Add dassert(), and use it for debug-only asserts.
2011-03-17 01:30:13 +08:00
|
|
|
assert(tbin->ncached < tbin_info->ncached_max);
|
2010-01-17 01:53:50 +08:00
|
|
|
tbin->ncached++;
|
2015-10-28 06:12:10 +08:00
|
|
|
*(tbin->avail - tbin->ncached) = ptr;
|
2010-01-17 01:53:50 +08:00
|
|
|
|
2015-01-30 07:30:47 +08:00
|
|
|
tcache_event(tsd, tcache);
|
|
|
|
}
|
|
|
|
|
|
|
|
JEMALLOC_ALWAYS_INLINE tcache_t *
|
2017-01-16 08:56:30 +08:00
|
|
|
tcaches_get(tsd_t *tsd, unsigned ind) {
|
2015-01-30 07:30:47 +08:00
|
|
|
tcaches_t *elm = &tcaches[ind];
|
2016-04-23 05:34:14 +08:00
|
|
|
if (unlikely(elm->tcache == NULL)) {
|
2017-03-28 12:50:38 +08:00
|
|
|
elm->tcache = tcache_create_explicit(tsd);
|
2016-04-23 05:34:14 +08:00
|
|
|
}
|
2017-01-20 10:15:45 +08:00
|
|
|
return elm->tcache;
|
2010-01-17 01:53:50 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-11 10:06:31 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */
|