Add a batch allocation interface to the PAI.

For now, no real allocator actually implements this interface; this will change
in subsequent diffs.
This commit is contained in:
David Goldblatt 2021-01-07 12:27:43 -08:00 committed by David Goldblatt
parent bf448d7a5a
commit 480f3b11cd
6 changed files with 59 additions and 1 deletions

View File

@ -8,6 +8,14 @@ struct pai_s {
/* Returns NULL on failure. */
edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
size_t alignment, bool zero);
/*
* Returns the number of extents added to the list (which may be fewer
* than requested, in case of OOM). The list should already be
* initialized. The only alignment guarantee is page-alignment, and
* the results are not necessarily zeroed.
*/
size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results);
bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero);
bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
@ -28,6 +36,12 @@ pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero) {
return self->alloc(tsdn, self, size, alignment, zero);
}
static inline size_t
pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
edata_list_active_t *results) {
return self->alloc_batch(tsdn, self, size, nallocs, results);
}
static inline bool
pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
size_t new_size, bool zero) {
@ -51,9 +65,12 @@ pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list) {
}
/*
* An implementation of batch deallocation that simply calls dalloc once for
* An implementation of batch allocation that simply calls alloc once for
* each item in the list.
*/
size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results);
/* Ditto, for dalloc. */
void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
edata_list_active_t *list);

View File

@ -90,6 +90,7 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
* operating on corrupted data.
*/
shard->pai.alloc = &hpa_alloc;
shard->pai.alloc_batch = &pai_alloc_batch_default;
shard->pai.expand = &hpa_expand;
shard->pai.shrink = &hpa_shrink;
shard->pai.dalloc = &hpa_dalloc;

View File

@ -91,6 +91,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED);
pac->pai.alloc = &pac_alloc_impl;
pac->pai.alloc_batch = &pai_alloc_batch_default;
pac->pai.expand = &pac_expand_impl;
pac->pai.shrink = &pac_shrink_impl;
pac->pai.dalloc = &pac_dalloc_impl;

View File

@ -1,6 +1,19 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
size_t
pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results) {
for (size_t i = 0; i < nallocs; i++) {
edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
/* zero */ false);
if (edata == NULL) {
return i;
}
edata_list_active_append(results, edata);
}
return nallocs;
}
void
pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,

View File

@ -52,6 +52,7 @@ sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t alloc_max,
* initialization failed will segfault in an easy-to-spot way.
*/
sec->pai.alloc = &sec_alloc;
sec->pai.alloc_batch = &pai_alloc_batch_default;
sec->pai.expand = &sec_expand;
sec->pai.shrink = &sec_shrink;
sec->pai.dalloc = &sec_dalloc;

View File

@ -7,6 +7,7 @@ struct pai_test_allocator_s {
pai_t pai;
bool alloc_fail;
size_t alloc_count;
size_t alloc_batch_count;
size_t dalloc_count;
size_t dalloc_batch_count;
/*
@ -42,6 +43,28 @@ pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
return edata;
}
static inline size_t
pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
size_t nallocs, edata_list_active_t *results) {
pai_test_allocator_t *ta = (pai_test_allocator_t *)self;
if (ta->alloc_fail) {
return 0;
}
for (size_t i = 0; i < nallocs; i++) {
edata_t *edata = malloc(sizeof(edata_t));
assert_ptr_not_null(edata, "");
edata_init(edata, /* arena_ind */ 0,
(void *)ta->next_ptr, size,
/* slab */ false, /* szind */ 0, /* sn */ 1,
extent_state_active, /* zero */ false, /* comitted */ true,
/* ranged */ false, EXTENT_NOT_HEAD);
ta->next_ptr += size;
ta->alloc_batch_count++;
edata_list_active_append(results, edata);
}
return nallocs;
}
static bool
pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
size_t old_size, size_t new_size, bool zero) {
@ -82,6 +105,7 @@ static inline void
pai_test_allocator_init(pai_test_allocator_t *ta) {
ta->alloc_fail = false;
ta->alloc_count = 0;
ta->alloc_batch_count = 0;
ta->dalloc_count = 0;
ta->dalloc_batch_count = 0;
/* Just don't start the edata at 0. */
@ -91,6 +115,7 @@ pai_test_allocator_init(pai_test_allocator_t *ta) {
ta->shrink_count = 0;
ta->shrink_return_value = false;
ta->pai.alloc = &pai_test_allocator_alloc;
ta->pai.alloc_batch = &pai_test_allocator_alloc_batch;
ta->pai.expand = &pai_test_allocator_expand;
ta->pai.shrink = &pai_test_allocator_shrink;
ta->pai.dalloc = &pai_test_allocator_dalloc;