2017-08-11 05:27:58 +08:00
|
|
|
#ifndef JEMALLOC_INTERNAL_CACHE_BIN_H
|
|
|
|
#define JEMALLOC_INTERNAL_CACHE_BIN_H
|
|
|
|
|
2023-06-10 08:37:47 +08:00
|
|
|
#include "jemalloc/internal/jemalloc_preamble.h"
|
|
|
|
#include "jemalloc/internal/jemalloc_internal_externs.h"
|
2017-08-12 08:34:21 +08:00
|
|
|
#include "jemalloc/internal/ql.h"
|
2022-07-21 06:25:56 +08:00
|
|
|
#include "jemalloc/internal/safety_check.h"
|
2020-02-18 03:48:42 +08:00
|
|
|
#include "jemalloc/internal/sz.h"
|
2017-08-12 08:34:21 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The cache_bins are the mechanism that the tcache and the arena use to
|
|
|
|
* communicate. The tcache fills from and flushes to the arena by passing a
|
|
|
|
* cache_bin_t to fill/flush. When the arena needs to pull stats from the
|
|
|
|
* tcaches associated with it, it does so by iterating over its
|
|
|
|
* cache_bin_array_descriptor_t objects and reading out per-bin stats it
|
|
|
|
* contains. This makes it so that the arena need not know about the existence
|
|
|
|
* of the tcache at all.
|
|
|
|
*/
|
|
|
|
|
2020-03-01 06:41:47 +08:00
|
|
|
/*
|
|
|
|
* The size in bytes of each cache bin stack. We also use this to indicate
|
|
|
|
* *counts* of individual objects.
|
|
|
|
*/
|
2019-08-31 02:52:15 +08:00
|
|
|
typedef uint16_t cache_bin_sz_t;
|
2017-08-11 05:27:58 +08:00
|
|
|
|
2023-08-23 07:31:54 +08:00
|
|
|
#define JUNK_ADDR ((uintptr_t)0x7a7a7a7a7a7a7a7aULL)
|
2020-10-22 10:47:57 +08:00
|
|
|
/*
|
|
|
|
* Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
|
|
|
|
* bug starts leaking those. Make it look like the junk pattern but be distinct
|
|
|
|
* from it.
|
|
|
|
*/
|
2023-08-23 07:31:54 +08:00
|
|
|
static const uintptr_t cache_bin_preceding_junk = JUNK_ADDR;
|
|
|
|
/* Note: JUNK_ADDR vs. JUNK_ADDR + 1 -- this tells you which pointer leaked. */
|
|
|
|
static const uintptr_t cache_bin_trailing_junk = JUNK_ADDR + 1;
|
|
|
|
/*
|
|
|
|
* A pointer used to initialize a fake stack_head for disabled small bins
|
|
|
|
* so that the enabled/disabled assessment does not rely on ncached_max.
|
|
|
|
*/
|
|
|
|
extern const uintptr_t disabled_bin;
|
2020-10-22 10:47:57 +08:00
|
|
|
|
2020-05-12 05:19:37 +08:00
|
|
|
/*
|
|
|
|
* That implies the following value, for the maximum number of items in any
|
|
|
|
* individual bin. The cache bins track their bounds looking just at the low
|
|
|
|
* bits of a pointer, compared against a cache_bin_sz_t. So that's
|
|
|
|
* 1 << (sizeof(cache_bin_sz_t) * 8)
|
|
|
|
* bytes spread across pointer sized objects to get the maximum.
|
|
|
|
*/
|
|
|
|
#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
|
|
|
|
/ sizeof(void *) - 1)
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* This lives inside the cache_bin (for locality reasons), and is initialized
|
|
|
|
* alongside it, but is otherwise not modified by any cache bin operations.
|
|
|
|
* It's logically public and maintained by its callers.
|
|
|
|
*/
|
2017-08-11 05:27:58 +08:00
|
|
|
typedef struct cache_bin_stats_s cache_bin_stats_t;
|
|
|
|
struct cache_bin_stats_s {
|
|
|
|
/*
|
|
|
|
* Number of allocation requests that corresponded to the size of this
|
|
|
|
* bin.
|
|
|
|
*/
|
|
|
|
uint64_t nrequests;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read-only information associated with each element of tcache_t's tbins array
|
|
|
|
* is stored separately, mainly to reduce memory usage.
|
|
|
|
*/
|
|
|
|
typedef struct cache_bin_info_s cache_bin_info_t;
|
|
|
|
struct cache_bin_info_s {
|
2020-03-04 10:32:36 +08:00
|
|
|
cache_bin_sz_t ncached_max;
|
2017-08-11 05:27:58 +08:00
|
|
|
};
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Responsible for caching allocations associated with a single size.
|
2021-12-29 05:38:12 +08:00
|
|
|
*
|
|
|
|
* Several pointers are used to track the stack. To save on metadata bytes,
|
|
|
|
* only the stack_head is a full sized pointer (which is dereferenced on the
|
|
|
|
* fastpath), while the others store only the low 16 bits -- this is correct
|
|
|
|
* because a single stack never takes more space than 2^16 bytes, and at the
|
|
|
|
* same time only equality checks are performed on the low bits.
|
|
|
|
*
|
|
|
|
* (low addr) (high addr)
|
|
|
|
* |------stashed------|------available------|------cached-----|
|
|
|
|
* ^ ^ ^ ^
|
|
|
|
* low_bound(derived) low_bits_full stack_head low_bits_empty
|
2020-03-08 07:56:49 +08:00
|
|
|
*/
|
2017-08-11 05:27:58 +08:00
|
|
|
typedef struct cache_bin_s cache_bin_t;
|
|
|
|
struct cache_bin_s {
|
|
|
|
/*
|
2020-03-04 10:32:36 +08:00
|
|
|
* The stack grows down. Whenever the bin is nonempty, the head points
|
|
|
|
* to an array entry containing a valid allocation. When it is empty,
|
|
|
|
* the head points to one element past the owned array.
|
|
|
|
*/
|
|
|
|
void **stack_head;
|
2020-03-05 00:58:42 +08:00
|
|
|
/*
|
|
|
|
* cur_ptr and stats are both modified frequently. Let's keep them
|
|
|
|
* close so that they have a higher chance of being on the same
|
|
|
|
* cacheline, thus less write-backs.
|
|
|
|
*/
|
|
|
|
cache_bin_stats_t tstats;
|
2020-03-04 10:32:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The low bits of the address of the first item in the stack that
|
|
|
|
* hasn't been used since the last GC, to track the low water mark (min
|
|
|
|
* # of cached items).
|
2019-08-10 13:12:47 +08:00
|
|
|
*
|
2020-03-04 10:32:36 +08:00
|
|
|
* Since the stack grows down, this is a higher address than
|
|
|
|
* low_bits_full.
|
|
|
|
*/
|
|
|
|
uint16_t low_bits_low_water;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The low bits of the value that stack_head will take on when the array
|
2021-12-29 05:38:12 +08:00
|
|
|
* is full (of cached & stashed items). But remember that stack_head
|
|
|
|
* always points to a valid item when the array is nonempty -- this is
|
|
|
|
* in the array.
|
2019-08-10 13:12:47 +08:00
|
|
|
*
|
2021-12-29 05:38:12 +08:00
|
|
|
* Recall that since the stack grows down, this is the lowest available
|
|
|
|
* address in the array for caching. Only adjusted when stashing items.
|
2020-03-04 10:32:36 +08:00
|
|
|
*/
|
|
|
|
uint16_t low_bits_full;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The low bits of the value that stack_head will take on when the array
|
|
|
|
* is empty.
|
2019-08-10 13:12:47 +08:00
|
|
|
*
|
2020-03-04 10:32:36 +08:00
|
|
|
* The stack grows down -- this is one past the highest address in the
|
2021-10-19 08:33:15 +08:00
|
|
|
* array. Immutable after initialization.
|
2019-08-10 13:12:47 +08:00
|
|
|
*/
|
2020-03-04 10:32:36 +08:00
|
|
|
uint16_t low_bits_empty;
|
2023-08-07 02:38:30 +08:00
|
|
|
|
|
|
|
/* The maximum number of cached items in the bin. */
|
|
|
|
cache_bin_info_t bin_info;
|
2017-08-11 05:27:58 +08:00
|
|
|
};
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* The cache_bins live inside the tcache, but the arena (by design) isn't
|
|
|
|
* supposed to know much about tcache internals. To let the arena iterate over
|
|
|
|
* associated bins, we keep (with the tcache) a linked list of
|
|
|
|
* cache_bin_array_descriptor_ts that tell the arena how to find the bins.
|
|
|
|
*/
|
2017-08-12 08:34:21 +08:00
|
|
|
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
|
|
|
|
struct cache_bin_array_descriptor_s {
|
|
|
|
/*
|
|
|
|
* The arena keeps a list of the cache bins associated with it, for
|
|
|
|
* stats collection.
|
|
|
|
*/
|
|
|
|
ql_elm(cache_bin_array_descriptor_t) link;
|
|
|
|
/* Pointers to the tcache bins. */
|
2020-04-08 11:04:46 +08:00
|
|
|
cache_bin_t *bins;
|
2017-08-12 08:34:21 +08:00
|
|
|
};
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
static inline void
|
|
|
|
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
|
2020-04-08 11:04:46 +08:00
|
|
|
cache_bin_t *bins) {
|
2020-03-08 07:56:49 +08:00
|
|
|
ql_elm_new(descriptor, link);
|
2020-04-08 11:04:46 +08:00
|
|
|
descriptor->bins = bins;
|
2020-03-08 07:56:49 +08:00
|
|
|
}
|
2019-08-15 04:08:06 +08:00
|
|
|
|
2021-10-19 08:33:15 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
cache_bin_nonfast_aligned(const void *ptr) {
|
|
|
|
if (!config_uaf_detection) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Currently we use alignment to decide which pointer to junk & stash on
|
|
|
|
* dealloc (for catching use-after-free). In some common cases a
|
|
|
|
* page-aligned check is needed already (sdalloc w/ config_prof), so we
|
|
|
|
* are getting it more or less for free -- no added instructions on
|
|
|
|
* free_fastpath.
|
|
|
|
*
|
|
|
|
* Another way of deciding which pointer to sample, is adding another
|
|
|
|
* thread_event to pick one every N bytes. That also adds no cost on
|
|
|
|
* the fastpath, however it will tend to pick large allocations which is
|
|
|
|
* not the desired behavior.
|
|
|
|
*/
|
|
|
|
return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
|
|
|
|
}
|
|
|
|
|
2023-08-23 07:31:54 +08:00
|
|
|
static inline const void *
|
|
|
|
cache_bin_disabled_bin_stack(void) {
|
|
|
|
return &disabled_bin;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a cache bin was zero initialized (either because it lives in static or
|
|
|
|
* thread-local storage, or was memset to 0), this function indicates whether or
|
|
|
|
* not cache_bin_init was called on it.
|
|
|
|
*/
|
|
|
|
static inline bool
|
|
|
|
cache_bin_still_zero_initialized(cache_bin_t *bin) {
|
|
|
|
return bin->stack_head == NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
cache_bin_disabled(cache_bin_t *bin) {
|
|
|
|
bool disabled = (bin->stack_head == cache_bin_disabled_bin_stack());
|
|
|
|
if (disabled) {
|
|
|
|
assert((uintptr_t)(*bin->stack_head) == JUNK_ADDR);
|
|
|
|
}
|
|
|
|
return disabled;
|
|
|
|
}
|
|
|
|
|
2019-08-15 04:08:06 +08:00
|
|
|
/* Returns ncached_max: Upper limit on ncached. */
|
|
|
|
static inline cache_bin_sz_t
|
2023-08-23 07:31:54 +08:00
|
|
|
cache_bin_info_ncached_max_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
|
|
|
assert(!cache_bin_disabled(bin));
|
|
|
|
assert(info == &bin->bin_info);
|
2020-03-04 10:32:36 +08:00
|
|
|
return info->ncached_max;
|
2019-08-15 04:08:06 +08:00
|
|
|
}
|
|
|
|
|
2020-03-04 10:32:36 +08:00
|
|
|
/*
|
2020-03-08 07:56:49 +08:00
|
|
|
* Internal.
|
|
|
|
*
|
2020-03-04 10:32:36 +08:00
|
|
|
* Asserts that the pointer associated with earlier is <= the one associated
|
|
|
|
* with later.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
|
|
|
|
if (earlier > later) {
|
|
|
|
assert(bin->low_bits_full > bin->low_bits_empty);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-03-08 07:56:49 +08:00
|
|
|
* Internal.
|
|
|
|
*
|
|
|
|
* Does difference calculations that handle wraparound correctly. Earlier must
|
|
|
|
* be associated with the position earlier in memory.
|
2020-03-04 10:32:36 +08:00
|
|
|
*/
|
|
|
|
static inline uint16_t
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
|
|
|
|
cache_bin_assert_earlier(bin, earlier, later);
|
2020-03-04 10:32:36 +08:00
|
|
|
return later - earlier;
|
|
|
|
}
|
|
|
|
|
2021-01-08 05:22:08 +08:00
|
|
|
/*
|
|
|
|
* Number of items currently cached in the bin, without checking ncached_max.
|
|
|
|
*/
|
2019-08-10 13:12:47 +08:00
|
|
|
static inline cache_bin_sz_t
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_ncached_get_internal(cache_bin_t *bin) {
|
2020-03-04 10:32:36 +08:00
|
|
|
cache_bin_sz_t diff = cache_bin_diff(bin,
|
2022-08-13 02:31:07 +08:00
|
|
|
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty);
|
2020-03-04 10:32:36 +08:00
|
|
|
cache_bin_sz_t n = diff / sizeof(void *);
|
2021-01-08 05:22:08 +08:00
|
|
|
/*
|
|
|
|
* We have undefined behavior here; if this function is called from the
|
|
|
|
* arena stats updating code, then stack_head could change from the
|
|
|
|
* first line to the next one. Morally, these loads should be atomic,
|
|
|
|
* but compilers won't currently generate comparisons with in-memory
|
|
|
|
* operands against atomics, and these variables get accessed on the
|
|
|
|
* fast paths. This should still be "safe" in the sense of generating
|
|
|
|
* the correct assembly for the foreseeable future, though.
|
|
|
|
*/
|
2022-08-13 02:31:07 +08:00
|
|
|
assert(n == 0 || *(bin->stack_head) != NULL);
|
2020-10-23 05:44:36 +08:00
|
|
|
return n;
|
|
|
|
}
|
2019-08-10 13:12:47 +08:00
|
|
|
|
2021-01-08 05:22:08 +08:00
|
|
|
/*
|
|
|
|
* Number of items currently cached in the bin, with checking ncached_max. The
|
|
|
|
* caller must know that no concurrent modification of the cache_bin is
|
|
|
|
* possible.
|
|
|
|
*/
|
|
|
|
static inline cache_bin_sz_t
|
|
|
|
cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin);
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(n <= cache_bin_info_ncached_max_get(bin, info));
|
2021-01-08 05:22:08 +08:00
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Internal.
|
|
|
|
*
|
|
|
|
* A pointer to the position one past the end of the backing array.
|
2022-02-15 09:57:14 +08:00
|
|
|
*
|
|
|
|
* Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
|
|
|
|
* are subject to concurrent modifications.
|
2020-03-08 07:56:49 +08:00
|
|
|
*/
|
2019-08-10 13:12:47 +08:00
|
|
|
static inline void **
|
2020-10-23 05:44:36 +08:00
|
|
|
cache_bin_empty_position_get(cache_bin_t *bin) {
|
2020-03-04 10:32:36 +08:00
|
|
|
cache_bin_sz_t diff = cache_bin_diff(bin,
|
2022-08-13 02:31:07 +08:00
|
|
|
(uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty);
|
2023-07-25 01:33:36 +08:00
|
|
|
byte_t *empty_bits = (byte_t *)bin->stack_head + diff;
|
2020-03-04 10:32:36 +08:00
|
|
|
void **ret = (void **)empty_bits;
|
2019-08-10 13:12:47 +08:00
|
|
|
|
2020-03-04 10:32:36 +08:00
|
|
|
assert(ret >= bin->stack_head);
|
2019-08-10 13:12:47 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-02-15 09:57:14 +08:00
|
|
|
/*
|
|
|
|
* Internal.
|
|
|
|
*
|
|
|
|
* Calculates low bits of the lower bound of the usable cache bin's range (see
|
|
|
|
* cache_bin_t visual representation above).
|
|
|
|
*
|
|
|
|
* No values are concurrently modified, so should be safe to read in a
|
|
|
|
* multithreaded environment. Currently concurrent access happens only during
|
|
|
|
* arena statistics collection.
|
|
|
|
*/
|
|
|
|
static inline uint16_t
|
|
|
|
cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
|
|
|
return (uint16_t)bin->low_bits_empty -
|
2023-08-23 07:31:54 +08:00
|
|
|
cache_bin_info_ncached_max_get(bin, info) * sizeof(void *);
|
2022-02-15 09:57:14 +08:00
|
|
|
}
|
|
|
|
|
2021-10-19 08:33:15 +08:00
|
|
|
/*
|
|
|
|
* Internal.
|
|
|
|
*
|
|
|
|
* A pointer to the position with the lowest address of the backing array.
|
|
|
|
*/
|
|
|
|
static inline void **
|
2021-12-29 05:38:12 +08:00
|
|
|
cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
2023-08-23 07:31:54 +08:00
|
|
|
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
|
2021-10-19 08:33:15 +08:00
|
|
|
void **ret = cache_bin_empty_position_get(bin) - ncached_max;
|
|
|
|
assert(ret <= bin->stack_head);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* As the name implies. This is important since it's not correct to try to
|
|
|
|
* batch fill a nonempty cache bin.
|
|
|
|
*/
|
2020-03-03 10:14:19 +08:00
|
|
|
static inline void
|
|
|
|
cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
|
2021-01-08 05:22:08 +08:00
|
|
|
assert(cache_bin_ncached_get_local(bin, info) == 0);
|
2020-10-23 05:44:36 +08:00
|
|
|
assert(cache_bin_empty_position_get(bin) == bin->stack_head);
|
2020-03-03 10:14:19 +08:00
|
|
|
}
|
|
|
|
|
2020-03-04 10:32:36 +08:00
|
|
|
/*
|
|
|
|
* Get low water, but without any of the correctness checking we do for the
|
|
|
|
* caller-usable version, if we are temporarily breaking invariants (like
|
|
|
|
* ncached >= low_water during flush).
|
|
|
|
*/
|
|
|
|
static inline cache_bin_sz_t
|
2020-10-23 05:44:36 +08:00
|
|
|
cache_bin_low_water_get_internal(cache_bin_t *bin) {
|
2020-03-04 10:32:36 +08:00
|
|
|
return cache_bin_diff(bin, bin->low_bits_low_water,
|
2022-08-13 02:31:07 +08:00
|
|
|
bin->low_bits_empty) / sizeof(void *);
|
2020-03-04 10:32:36 +08:00
|
|
|
}
|
2020-03-03 10:14:19 +08:00
|
|
|
|
2019-08-31 02:52:15 +08:00
|
|
|
/* Returns the numeric value of low water in [0, ncached]. */
|
2019-08-10 13:12:47 +08:00
|
|
|
static inline cache_bin_sz_t
|
2020-02-29 10:55:33 +08:00
|
|
|
cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
|
2020-10-23 05:44:36 +08:00
|
|
|
cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(low_water <= cache_bin_info_ncached_max_get(bin, info));
|
2021-01-08 05:22:08 +08:00
|
|
|
assert(low_water <= cache_bin_ncached_get_local(bin, info));
|
2020-03-04 10:32:36 +08:00
|
|
|
|
|
|
|
cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
|
|
|
|
bin->low_bits_low_water);
|
2019-08-10 13:12:47 +08:00
|
|
|
|
|
|
|
return low_water;
|
|
|
|
}
|
|
|
|
|
2020-03-01 07:07:38 +08:00
|
|
|
/*
|
|
|
|
* Indicates that the current cache bin position should be the low water mark
|
|
|
|
* going forward.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
cache_bin_low_water_set(cache_bin_t *bin) {
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(!cache_bin_disabled(bin));
|
2020-03-04 10:32:36 +08:00
|
|
|
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
|
2019-08-10 13:12:47 +08:00
|
|
|
}
|
|
|
|
|
2020-10-23 05:44:36 +08:00
|
|
|
static inline void
|
|
|
|
cache_bin_low_water_adjust(cache_bin_t *bin) {
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(!cache_bin_disabled(bin));
|
2022-08-13 02:31:07 +08:00
|
|
|
if (cache_bin_ncached_get_internal(bin)
|
2020-10-23 05:44:36 +08:00
|
|
|
< cache_bin_low_water_get_internal(bin)) {
|
|
|
|
cache_bin_low_water_set(bin);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-11 05:27:58 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2020-03-05 00:58:42 +08:00
|
|
|
cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
|
|
|
|
/*
|
|
|
|
* success (instead of ret) should be checked upon the return of this
|
|
|
|
* function. We avoid checking (ret == NULL) because there is never a
|
|
|
|
* null stored on the avail stack (which is unknown to the compiler),
|
|
|
|
* and eagerly checking ret would cause pipeline stall (waiting for the
|
|
|
|
* cacheline).
|
|
|
|
*/
|
|
|
|
|
2018-10-19 04:13:57 +08:00
|
|
|
/*
|
2019-08-10 13:12:47 +08:00
|
|
|
* This may read from the empty position; however the loaded value won't
|
|
|
|
* be used. It's safe because the stack has one more slot reserved.
|
2018-10-09 03:29:57 +08:00
|
|
|
*/
|
2020-03-04 10:32:36 +08:00
|
|
|
void *ret = *bin->stack_head;
|
|
|
|
uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
|
|
|
|
void **new_head = bin->stack_head + 1;
|
2020-03-05 00:58:42 +08:00
|
|
|
|
2019-08-10 13:12:47 +08:00
|
|
|
/*
|
2020-03-04 10:32:36 +08:00
|
|
|
* Note that the low water mark is at most empty; if we pass this check,
|
|
|
|
* we know we're non-empty.
|
2019-08-10 13:12:47 +08:00
|
|
|
*/
|
2020-03-05 00:58:42 +08:00
|
|
|
if (likely(low_bits != bin->low_bits_low_water)) {
|
|
|
|
bin->stack_head = new_head;
|
|
|
|
*success = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (!adjust_low_water) {
|
|
|
|
*success = false;
|
|
|
|
return NULL;
|
2017-08-11 05:27:58 +08:00
|
|
|
}
|
|
|
|
/*
|
2020-03-05 00:58:42 +08:00
|
|
|
* In the fast-path case where we call alloc_easy and then alloc, the
|
|
|
|
* previous checking and computation is optimized away -- we didn't
|
|
|
|
* actually commit any of our operations.
|
2017-08-11 05:27:58 +08:00
|
|
|
*/
|
2020-03-05 00:58:42 +08:00
|
|
|
if (likely(low_bits != bin->low_bits_empty)) {
|
|
|
|
bin->stack_head = new_head;
|
|
|
|
bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
|
|
|
|
*success = true;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
*success = false;
|
|
|
|
return NULL;
|
2017-08-11 05:27:58 +08:00
|
|
|
}
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Allocate an item out of the bin, failing if we're at the low-water mark.
|
|
|
|
*/
|
2019-10-09 02:33:55 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2020-03-05 00:58:42 +08:00
|
|
|
cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
|
2020-02-29 10:55:33 +08:00
|
|
|
/* We don't look at info if we're not adjusting low-water. */
|
2020-03-05 00:58:42 +08:00
|
|
|
return cache_bin_alloc_impl(bin, success, false);
|
2019-10-09 02:33:55 +08:00
|
|
|
}
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Allocate an item out of the bin, even if we're currently at the low-water
|
|
|
|
* mark (and failing only if the bin is empty).
|
|
|
|
*/
|
2019-10-09 02:33:55 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE void *
|
2020-03-05 00:58:42 +08:00
|
|
|
cache_bin_alloc(cache_bin_t *bin, bool *success) {
|
|
|
|
return cache_bin_alloc_impl(bin, success, true);
|
2019-10-09 02:33:55 +08:00
|
|
|
}
|
|
|
|
|
2020-10-23 07:07:25 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
|
|
|
cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_sz_t n = cache_bin_ncached_get_internal(bin);
|
2020-10-23 07:07:25 +08:00
|
|
|
if (n > num) {
|
2021-12-14 14:05:13 +08:00
|
|
|
n = (cache_bin_sz_t)num;
|
2020-10-23 07:07:25 +08:00
|
|
|
}
|
|
|
|
memcpy(out, bin->stack_head, n * sizeof(void *));
|
|
|
|
bin->stack_head += n;
|
|
|
|
cache_bin_low_water_adjust(bin);
|
2021-12-14 14:05:13 +08:00
|
|
|
|
2020-10-23 07:07:25 +08:00
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2021-10-19 08:33:15 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
cache_bin_full(cache_bin_t *bin) {
|
|
|
|
return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
|
|
|
|
}
|
|
|
|
|
2022-07-21 06:25:56 +08:00
|
|
|
/*
|
|
|
|
* Scans the allocated area of the cache_bin for the given pointer up to limit.
|
|
|
|
* Fires safety_check_fail if the ptr is found and returns true.
|
|
|
|
*/
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
cache_bin_dalloc_safety_checks(cache_bin_t *bin, void *ptr) {
|
|
|
|
if (!config_debug || opt_debug_double_free_max_scan == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_sz_t ncached = cache_bin_ncached_get_internal(bin);
|
2022-07-21 06:25:56 +08:00
|
|
|
unsigned max_scan = opt_debug_double_free_max_scan < ncached
|
|
|
|
? opt_debug_double_free_max_scan
|
|
|
|
: ncached;
|
|
|
|
|
|
|
|
void **cur = bin->stack_head;
|
|
|
|
void **limit = cur + max_scan;
|
|
|
|
for (; cur < limit; cur++) {
|
|
|
|
if (*cur == ptr) {
|
|
|
|
safety_check_fail(
|
|
|
|
"Invalid deallocation detected: double free of "
|
|
|
|
"pointer %p\n",
|
|
|
|
ptr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Free an object into the given bin. Fails only if the bin is full.
|
|
|
|
*/
|
2018-10-19 04:13:57 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
2019-08-10 13:12:47 +08:00
|
|
|
cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
|
2021-10-19 08:33:15 +08:00
|
|
|
if (unlikely(cache_bin_full(bin))) {
|
2018-10-19 04:13:57 +08:00
|
|
|
return false;
|
|
|
|
}
|
2019-08-10 13:12:47 +08:00
|
|
|
|
2023-04-21 06:38:28 +08:00
|
|
|
if (unlikely(cache_bin_dalloc_safety_checks(bin, ptr))) {
|
|
|
|
return true;
|
|
|
|
}
|
2022-07-21 06:25:56 +08:00
|
|
|
|
2020-03-04 10:32:36 +08:00
|
|
|
bin->stack_head--;
|
|
|
|
*bin->stack_head = ptr;
|
|
|
|
cache_bin_assert_earlier(bin, bin->low_bits_full,
|
|
|
|
(uint16_t)(uintptr_t)bin->stack_head);
|
2018-10-19 04:13:57 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2021-10-19 08:33:15 +08:00
|
|
|
/* Returns false if failed to stash (i.e. bin is full). */
|
|
|
|
JEMALLOC_ALWAYS_INLINE bool
|
|
|
|
cache_bin_stash(cache_bin_t *bin, void *ptr) {
|
|
|
|
if (cache_bin_full(bin)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stash at the full position, in the [full, head) range. */
|
|
|
|
uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
|
|
|
|
/* Wraparound handled as well. */
|
2022-08-13 02:31:07 +08:00
|
|
|
uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head);
|
2023-07-25 01:33:36 +08:00
|
|
|
*(void **)((byte_t *)bin->stack_head - diff) = ptr;
|
2021-10-19 08:33:15 +08:00
|
|
|
|
|
|
|
assert(!cache_bin_full(bin));
|
|
|
|
bin->low_bits_full += sizeof(void *);
|
|
|
|
cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-08-13 02:31:07 +08:00
|
|
|
/* Get the number of stashed pointers. */
|
2021-10-19 08:33:15 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info) {
|
2023-08-23 07:31:54 +08:00
|
|
|
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
|
2022-02-15 09:57:14 +08:00
|
|
|
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
|
|
|
|
info);
|
2021-10-19 08:33:15 +08:00
|
|
|
|
2022-02-15 09:57:14 +08:00
|
|
|
cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
|
2022-08-13 02:31:07 +08:00
|
|
|
bin->low_bits_full) / sizeof(void *);
|
2021-12-01 06:39:34 +08:00
|
|
|
assert(n <= ncached_max);
|
2023-08-23 07:31:54 +08:00
|
|
|
if (config_debug && n != 0) {
|
|
|
|
/* Below are for assertions only. */
|
|
|
|
void **low_bound = cache_bin_low_bound_get(bin, info);
|
2021-12-01 06:39:34 +08:00
|
|
|
|
2023-08-23 07:31:54 +08:00
|
|
|
assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
|
|
|
|
void *stashed = *(low_bound + n - 1);
|
|
|
|
bool aligned = cache_bin_nonfast_aligned(stashed);
|
2021-12-01 06:39:34 +08:00
|
|
|
#ifdef JEMALLOC_JET
|
2023-08-23 07:31:54 +08:00
|
|
|
/* Allow arbitrary pointers to be stashed in tests. */
|
|
|
|
aligned = true;
|
2021-12-01 06:39:34 +08:00
|
|
|
#endif
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(stashed != NULL && aligned);
|
|
|
|
}
|
2021-12-01 06:39:34 +08:00
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
2021-10-19 08:33:15 +08:00
|
|
|
|
2021-12-01 06:39:34 +08:00
|
|
|
JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
|
|
|
|
cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
|
2022-08-13 02:31:07 +08:00
|
|
|
cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info);
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(n <= cache_bin_info_ncached_max_get(bin, info));
|
2021-12-01 06:39:34 +08:00
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Obtain a racy view of the number of items currently in the cache bin, in the
|
|
|
|
* presence of possible concurrent modifications.
|
2022-08-13 02:31:07 +08:00
|
|
|
*
|
|
|
|
* Note that this is the only racy function in this header. Any other functions
|
|
|
|
* are assumed to be non-racy. The "racy" term here means accessed from another
|
|
|
|
* thread (that is not the owner of the specific cache bin). This only happens
|
|
|
|
* when gathering stats (read-only). The only change because of the racy
|
|
|
|
* condition is that assertions based on mutable fields are omitted.
|
|
|
|
*
|
|
|
|
* It's important to keep in mind that 'bin->stack_head' and
|
|
|
|
* 'bin->low_bits_full' can be modified concurrently and almost no assertions
|
|
|
|
* about their values can be made.
|
|
|
|
*
|
|
|
|
* This function should not call other utility functions because the racy
|
|
|
|
* condition may cause unexpected / undefined behaviors in unverified utility
|
|
|
|
* functions. Currently, this function calls two utility functions
|
2023-08-23 07:31:54 +08:00
|
|
|
* cache_bin_info_ncached_max_get and cache_bin_low_bits_low_bound_get because
|
|
|
|
* they help access values that will not be concurrently modified.
|
2021-12-01 06:39:34 +08:00
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
|
|
|
|
cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
|
2022-08-13 02:31:07 +08:00
|
|
|
/* Racy version of cache_bin_ncached_get_internal. */
|
|
|
|
cache_bin_sz_t diff = bin->low_bits_empty -
|
|
|
|
(uint16_t)(uintptr_t)bin->stack_head;
|
|
|
|
cache_bin_sz_t n = diff / sizeof(void *);
|
|
|
|
|
2023-08-23 07:31:54 +08:00
|
|
|
cache_bin_sz_t ncached_max = cache_bin_info_ncached_max_get(bin, info);
|
|
|
|
assert(n <= ncached_max);
|
2021-12-01 06:39:34 +08:00
|
|
|
*ncached = n;
|
|
|
|
|
2022-08-13 02:31:07 +08:00
|
|
|
/* Racy version of cache_bin_nstashed_get_internal. */
|
|
|
|
uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
|
|
|
|
info);
|
|
|
|
n = (bin->low_bits_full - low_bits_low_bound) / sizeof(void *);
|
|
|
|
|
2023-08-23 07:31:54 +08:00
|
|
|
assert(n <= ncached_max);
|
2021-12-01 06:39:34 +08:00
|
|
|
*nstashed = n;
|
|
|
|
/* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
|
2021-10-19 08:33:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2020-03-08 07:56:49 +08:00
|
|
|
* Filling and flushing are done in batch, on arrays of void *s. For filling,
|
|
|
|
* the arrays go forward, and can be accessed with ordinary array arithmetic.
|
|
|
|
* For flushing, we work from the end backwards, and so need to use special
|
|
|
|
* accessors that invert the usual ordering.
|
|
|
|
*
|
|
|
|
* This is important for maintaining first-fit; the arena code fills with
|
|
|
|
* earliest objects first, and so those are the ones we should return first for
|
|
|
|
* cache_bin_alloc calls. When flushing, we should flush the objects that we
|
|
|
|
* wish to return later; those at the end of the array. This is better for the
|
|
|
|
* first-fit heuristic as well as for cache locality; the most recently freed
|
|
|
|
* objects are the ones most likely to still be in cache.
|
|
|
|
*
|
|
|
|
* This all sounds very hand-wavey and theoretical, but reverting the ordering
|
|
|
|
* on one or the other pathway leads to measurable slowdowns.
|
|
|
|
*/
|
|
|
|
|
2020-02-27 09:10:12 +08:00
|
|
|
typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
|
|
|
|
struct cache_bin_ptr_array_s {
|
2020-02-27 09:39:55 +08:00
|
|
|
cache_bin_sz_t n;
|
2020-02-27 09:10:12 +08:00
|
|
|
void **ptr;
|
|
|
|
};
|
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Declare a cache_bin_ptr_array_t sufficient for nval items.
|
|
|
|
*
|
|
|
|
* In the current implementation, this could be just part of a
|
|
|
|
* cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
|
|
|
|
* Indirecting behind a macro, though, means experimenting with linked-list
|
|
|
|
* representations is easy (since they'll require an alloca in the calling
|
|
|
|
* frame).
|
|
|
|
*/
|
2020-02-27 09:39:55 +08:00
|
|
|
#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
|
2020-02-27 09:10:12 +08:00
|
|
|
cache_bin_ptr_array_t name; \
|
2020-02-27 09:39:55 +08:00
|
|
|
name.n = (nval)
|
2020-02-27 09:10:12 +08:00
|
|
|
|
2020-03-08 07:56:49 +08:00
|
|
|
/*
|
|
|
|
* Start a fill. The bin must be empty, and This must be followed by a
|
|
|
|
* finish_fill call before doing any alloc/dalloc operations on the bin.
|
|
|
|
*/
|
2020-02-27 09:10:12 +08:00
|
|
|
static inline void
|
2020-02-29 11:12:07 +08:00
|
|
|
cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
|
|
|
|
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
|
2020-10-23 05:44:36 +08:00
|
|
|
cache_bin_assert_empty(bin, info);
|
|
|
|
arr->ptr = cache_bin_empty_position_get(bin) - nfill;
|
2020-02-27 09:10:12 +08:00
|
|
|
}
|
|
|
|
|
2020-02-29 11:12:07 +08:00
|
|
|
/*
|
|
|
|
* While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
|
|
|
|
* fill, nfilled here is the number we actually filled (which may be less, in
|
|
|
|
* case of OOM.
|
|
|
|
*/
|
2020-02-28 02:22:46 +08:00
|
|
|
static inline void
|
2020-02-29 11:12:07 +08:00
|
|
|
cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
|
2020-03-01 02:48:59 +08:00
|
|
|
cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
|
2020-10-23 05:44:36 +08:00
|
|
|
cache_bin_assert_empty(bin, info);
|
|
|
|
void **empty_position = cache_bin_empty_position_get(bin);
|
2020-02-29 11:12:07 +08:00
|
|
|
if (nfilled < arr->n) {
|
|
|
|
memmove(empty_position - nfilled, empty_position - arr->n,
|
|
|
|
nfilled * sizeof(void *));
|
|
|
|
}
|
2020-03-04 10:32:36 +08:00
|
|
|
bin->stack_head = empty_position - nfilled;
|
2020-02-29 11:12:07 +08:00
|
|
|
}
|
|
|
|
|
2021-01-30 05:10:44 +08:00
|
|
|
/*
|
|
|
|
* Same deal, but with flush. Unlike fill (which can fail), the user must flush
|
|
|
|
* everything we give them.
|
|
|
|
*/
|
2020-02-29 11:12:07 +08:00
|
|
|
static inline void
|
|
|
|
cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
|
|
|
|
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
|
2021-01-30 05:10:44 +08:00
|
|
|
arr->ptr = cache_bin_empty_position_get(bin) - nflush;
|
2021-01-08 05:22:08 +08:00
|
|
|
assert(cache_bin_ncached_get_local(bin, info) == 0
|
2020-02-29 11:12:07 +08:00
|
|
|
|| *arr->ptr != NULL);
|
2020-02-28 02:22:46 +08:00
|
|
|
}
|
|
|
|
|
2020-03-01 02:48:59 +08:00
|
|
|
static inline void
|
|
|
|
cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
|
|
|
|
cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
|
2021-01-08 05:22:08 +08:00
|
|
|
unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
|
2020-03-04 10:32:36 +08:00
|
|
|
memmove(bin->stack_head + nflushed, bin->stack_head,
|
2020-03-01 02:48:59 +08:00
|
|
|
rem * sizeof(void *));
|
2023-04-21 06:38:28 +08:00
|
|
|
bin->stack_head += nflushed;
|
2020-10-23 05:44:36 +08:00
|
|
|
cache_bin_low_water_adjust(bin);
|
2020-03-01 02:48:59 +08:00
|
|
|
}
|
|
|
|
|
2021-10-19 08:33:15 +08:00
|
|
|
static inline void
|
|
|
|
cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
|
|
|
|
cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
|
|
|
|
cache_bin_sz_t nstashed) {
|
|
|
|
assert(nstashed > 0);
|
2021-12-01 06:39:34 +08:00
|
|
|
assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
|
2021-10-19 08:33:15 +08:00
|
|
|
|
2021-12-29 05:38:12 +08:00
|
|
|
void **low_bound = cache_bin_low_bound_get(bin, info);
|
|
|
|
arr->ptr = low_bound;
|
2021-10-19 08:33:15 +08:00
|
|
|
assert(*arr->ptr != NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
|
2021-12-29 05:38:12 +08:00
|
|
|
void **low_bound = cache_bin_low_bound_get(bin, info);
|
2021-10-19 08:33:15 +08:00
|
|
|
|
|
|
|
/* Reset the bin local full position. */
|
2021-12-29 05:38:12 +08:00
|
|
|
bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
|
2021-12-01 06:39:34 +08:00
|
|
|
assert(cache_bin_nstashed_get_local(bin, info) == 0);
|
2021-10-19 08:33:15 +08:00
|
|
|
}
|
|
|
|
|
2020-03-01 06:41:47 +08:00
|
|
|
/*
|
|
|
|
* Initialize a cache_bin_info to represent up to the given number of items in
|
|
|
|
* the cache_bins it is associated with.
|
|
|
|
*/
|
|
|
|
void cache_bin_info_init(cache_bin_info_t *bin_info,
|
|
|
|
cache_bin_sz_t ncached_max);
|
|
|
|
/*
|
|
|
|
* Given an array of initialized cache_bin_info_ts, determine how big an
|
|
|
|
* allocation is required to initialize a full set of cache_bin_ts.
|
|
|
|
*/
|
|
|
|
void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
|
|
|
|
size_t *size, size_t *alignment);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Actually initialize some cache bins. Callers should allocate the backing
|
|
|
|
* memory indicated by a call to cache_bin_compute_alloc. They should then
|
|
|
|
* preincrement, call init once for each bin and info, and then call
|
|
|
|
* cache_bin_postincrement. *alloc_cur will then point immediately past the end
|
|
|
|
* of the allocation.
|
|
|
|
*/
|
|
|
|
void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
|
|
|
|
void *alloc, size_t *cur_offset);
|
2023-08-21 14:28:38 +08:00
|
|
|
void cache_bin_postincrement(void *alloc, size_t *cur_offset);
|
2020-03-01 06:41:47 +08:00
|
|
|
void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
|
|
|
size_t *cur_offset);
|
2023-08-23 07:31:54 +08:00
|
|
|
void cache_bin_init_disabled(cache_bin_t *bin, cache_bin_sz_t ncached_max);
|
2020-03-01 06:41:47 +08:00
|
|
|
|
2023-09-14 12:51:54 +08:00
|
|
|
bool cache_bin_stack_use_thp(void);
|
2020-03-03 10:07:19 +08:00
|
|
|
|
2017-08-11 05:27:58 +08:00
|
|
|
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
|