Allocate tcache stack from base allocator
When using metadata_thp, allocate tcache bin stacks from base0, which means they will be placed on huge pages along with other metadata, instead of mixed with other regular allocations. In order to do so, modified the base allocator to support limited reuse: freed tcached stacks (from thread termination) will be returned to base0 and made available for reuse, but no merging will be attempted since they were bump allocated out of base blocks. These reused base extents are managed using separately allocated base edata_t -- they are cached in base->edata_avail when the extent is all allocated. One tricky part is, stats updating must be skipped for such reused extents (since they were accounted for already, and there is no purging for base). This requires tracking the "if is reused" state explicitly and bypass the stats updates when allocating from them.
This commit is contained in:
@@ -73,6 +73,9 @@ struct base_s {
|
||||
/* Heap of extents that track unused trailing space within blocks. */
|
||||
edata_heap_t avail[SC_NSIZES];
|
||||
|
||||
/* Contains reusable base edata (used by tcache_stacks currently). */
|
||||
edata_avail_t edata_avail;
|
||||
|
||||
/* Stats, only maintained if config_stats. */
|
||||
size_t allocated;
|
||||
size_t resident;
|
||||
@@ -101,6 +104,8 @@ extent_hooks_t *base_extent_hooks_set(base_t *base,
|
||||
extent_hooks_t *extent_hooks);
|
||||
void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
|
||||
edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
|
||||
void *b0_alloc_tcache_stack(tsdn_t *tsdn, size_t size);
|
||||
void b0_dalloc_tcache_stack(tsdn_t *tsdn, void *tcache_stack);
|
||||
void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
|
||||
size_t *resident, size_t *mapped, size_t *n_thp);
|
||||
void base_prefork(tsdn_t *tsdn, base_t *base);
|
||||
|
@@ -704,5 +704,6 @@ void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
||||
* not cache_bin_init was called on it.
|
||||
*/
|
||||
bool cache_bin_still_zero_initialized(cache_bin_t *bin);
|
||||
bool cache_bin_stack_use_thp(void);
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
|
||||
|
@@ -621,7 +621,8 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
|
||||
}
|
||||
|
||||
static inline void
|
||||
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
|
||||
edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn,
|
||||
bool reused) {
|
||||
edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
|
||||
edata_addr_set(edata, addr);
|
||||
edata_bsize_set(edata, bsize);
|
||||
@@ -629,7 +630,8 @@ edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
|
||||
edata_szind_set(edata, SC_NSIZES);
|
||||
edata_sn_set(edata, sn);
|
||||
edata_state_set(edata, extent_state_active);
|
||||
edata_guarded_set(edata, false);
|
||||
/* See comments in base_edata_is_reused. */
|
||||
edata_guarded_set(edata, reused);
|
||||
edata_zeroed_set(edata, true);
|
||||
edata_committed_set(edata, true);
|
||||
/*
|
||||
|
Reference in New Issue
Block a user