Cache bin: rewrite to track more state.
With this, we track all of the empty, full, and low water states together. This simplifies a lot of the tracking logic, since we now don't need the cache_bin_info_t for state queries (except for some debugging).
This commit is contained in:
committed by
David Goldblatt
parent
fef0b1ffe4
commit
397da03865
@@ -8,7 +8,7 @@ cache_bin_info_init(cache_bin_info_t *info,
|
||||
cache_bin_sz_t ncached_max) {
|
||||
size_t stack_size = (size_t)ncached_max * sizeof(void *);
|
||||
assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
|
||||
info->stack_size = (cache_bin_sz_t)stack_size;
|
||||
info->ncached_max = (cache_bin_sz_t)ncached_max;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -23,23 +23,14 @@ cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
|
||||
*/
|
||||
*size = sizeof(void *) * 2;
|
||||
for (szind_t i = 0; i < ninfos; i++) {
|
||||
*size += infos[i].stack_size;
|
||||
*size += infos[i].ncached_max * sizeof(void *);
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Align to at least PAGE, to minimize the # of TLBs needed by the
|
||||
* Align to at least PAGE, to minimize the # of TLBs needed by the
|
||||
* smaller sizes; also helps if the larger sizes don't get used at all.
|
||||
* 2) On 32-bit the pointers won't be compressed; use minimal alignment.
|
||||
*/
|
||||
if (LG_SIZEOF_PTR < 3 || *size < PAGE) {
|
||||
*alignment = PAGE;
|
||||
} else {
|
||||
/*
|
||||
* Align pow2 to avoid overflow the cache bin compressed
|
||||
* pointers.
|
||||
*/
|
||||
*alignment = pow2_ceil_zu(*size);
|
||||
}
|
||||
*alignment = PAGE;
|
||||
}
|
||||
|
||||
void
|
||||
@@ -53,10 +44,6 @@ cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
|
||||
cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
|
||||
&computed_alignment);
|
||||
assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
|
||||
|
||||
/* And that alignment should disallow overflow. */
|
||||
uint32_t lowbits = (uint32_t)((uintptr_t)alloc + computed_size);
|
||||
assert((uint32_t)(uintptr_t)alloc < lowbits);
|
||||
}
|
||||
/*
|
||||
* Leave a noticeable mark pattern on the boundaries, in case a bug
|
||||
@@ -81,7 +68,6 @@ cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
|
||||
void
|
||||
cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
||||
size_t *cur_offset) {
|
||||
assert(sizeof(bin->cur_ptr) == sizeof(void *));
|
||||
/*
|
||||
* The full_position points to the lowest available space. Allocations
|
||||
* will access the slots toward higher addresses (for the benefit of
|
||||
@@ -89,21 +75,23 @@ cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
|
||||
*/
|
||||
void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
|
||||
void *full_position = stack_cur;
|
||||
uint32_t bin_stack_size = info->stack_size;
|
||||
uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
|
||||
|
||||
*cur_offset += bin_stack_size;
|
||||
void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
|
||||
|
||||
/* Init to the empty position. */
|
||||
bin->cur_ptr.ptr = empty_position;
|
||||
bin->low_water_position = bin->cur_ptr.lowbits;
|
||||
bin->full_position = (uint32_t)(uintptr_t)full_position;
|
||||
assert(bin->cur_ptr.lowbits - bin->full_position == bin_stack_size);
|
||||
bin->stack_head = (void **)empty_position;
|
||||
bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
|
||||
bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
|
||||
bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
|
||||
assert(cache_bin_diff(bin, bin->low_bits_full,
|
||||
(uint16_t)(uintptr_t) bin->stack_head) == bin_stack_size);
|
||||
assert(cache_bin_ncached_get(bin, info) == 0);
|
||||
assert(cache_bin_empty_position_get(bin, info) == empty_position);
|
||||
}
|
||||
|
||||
bool
|
||||
cache_bin_still_zero_initialized(cache_bin_t *bin) {
|
||||
return bin->cur_ptr.ptr == NULL;
|
||||
return bin->stack_head == NULL;
|
||||
}
|
||||
|
Reference in New Issue
Block a user