Pass 'frequent_reuse' hint to PAI
Currently used only for guarding purposes, the hint is used to determine if the allocation is supposed to be frequently reused. For example, it might urge the allocator to ensure the allocation is cached.
This commit is contained in:
committed by
Alexander Lapenkov
parent
2c70e8d351
commit
f56f5b9930
@@ -9,7 +9,8 @@
|
||||
#define HPA_EDEN_SIZE (128 * HUGEPAGE)
|
||||
|
||||
static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
|
||||
static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@@ -760,7 +761,7 @@ hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
|
||||
static edata_t *
|
||||
hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
|
||||
|
4
src/pa.c
4
src/pa.c
@@ -128,7 +128,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
edata_t *edata = NULL;
|
||||
if (!guarded && pa_shard_uses_hpa(shard)) {
|
||||
edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
zero, /* guarded */ false, slab, deferred_work_generated);
|
||||
}
|
||||
/*
|
||||
* Fall back to the PAC if the HPA is off or couldn't serve the given
|
||||
@@ -136,7 +136,7 @@ pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
|
||||
*/
|
||||
if (edata == NULL) {
|
||||
edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
|
||||
guarded, deferred_work_generated);
|
||||
guarded, slab, deferred_work_generated);
|
||||
}
|
||||
if (edata != NULL) {
|
||||
assert(edata_size_get(edata) == size);
|
||||
|
@@ -5,7 +5,8 @@
|
||||
#include "jemalloc/internal/san.h"
|
||||
|
||||
static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@@ -152,7 +153,8 @@ pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
|
||||
|
||||
static edata_t *
|
||||
pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
|
||||
bool zero, bool guarded, bool *deferred_work_generated) {
|
||||
bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated) {
|
||||
pac_t *pac = (pac_t *)self;
|
||||
ehooks_t *ehooks = pac_ehooks_get(pac);
|
||||
|
||||
|
@@ -7,7 +7,8 @@ pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
|
||||
for (size_t i = 0; i < nallocs; i++) {
|
||||
bool deferred_by_alloc = false;
|
||||
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
|
||||
/* zero */ false, /* guarded */ false, &deferred_by_alloc);
|
||||
/* zero */ false, /* guarded */ false,
|
||||
/* frequent_reuse */ false, &deferred_by_alloc);
|
||||
*deferred_work_generated |= deferred_by_alloc;
|
||||
if (edata == NULL) {
|
||||
return i;
|
||||
|
11
src/sec.c
11
src/sec.c
@@ -4,7 +4,8 @@
|
||||
#include "jemalloc/internal/sec.h"
|
||||
|
||||
static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
|
||||
size_t alignment, bool zero, bool guarded, bool *deferred_work_generated);
|
||||
size_t alignment, bool zero, bool guarded, bool frequent_reuse,
|
||||
bool *deferred_work_generated);
|
||||
static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
|
||||
static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
|
||||
@@ -218,7 +219,7 @@ sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
|
||||
|
||||
static edata_t *
|
||||
sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
bool guarded, bool *deferred_work_generated) {
|
||||
bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
|
||||
assert((size & PAGE_MASK) == 0);
|
||||
assert(!guarded);
|
||||
|
||||
@@ -227,7 +228,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
if (zero || alignment > PAGE || sec->opts.nshards == 0
|
||||
|| size > sec->opts.max_alloc) {
|
||||
return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
|
||||
/* guarded */ false, deferred_work_generated);
|
||||
/* guarded */ false, frequent_reuse,
|
||||
deferred_work_generated);
|
||||
}
|
||||
pszind_t pszind = sz_psz2ind(size);
|
||||
sec_shard_t *shard = sec_shard_pick(tsdn, sec);
|
||||
@@ -250,7 +252,8 @@ sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
|
||||
size);
|
||||
} else {
|
||||
edata = pai_alloc(tsdn, sec->fallback, size, alignment,
|
||||
zero, /* guarded */ false, deferred_work_generated);
|
||||
zero, /* guarded */ false, frequent_reuse,
|
||||
deferred_work_generated);
|
||||
}
|
||||
}
|
||||
return edata;
|
||||
|
Reference in New Issue
Block a user