From 480f3b11cd61c1cf37c90d61701829a0cebc98da Mon Sep 17 00:00:00 2001 From: David Goldblatt Date: Thu, 7 Jan 2021 12:27:43 -0800 Subject: [PATCH] Add a batch allocation interface to the PAI. For now, no real allocator actually implements this interface; this will change in subsequent diffs. --- include/jemalloc/internal/pai.h | 19 ++++++++++++++++++- src/hpa.c | 1 + src/pac.c | 1 + src/pai.c | 13 +++++++++++++ src/sec.c | 1 + test/unit/sec.c | 25 +++++++++++++++++++++++++ 6 files changed, 59 insertions(+), 1 deletion(-) diff --git a/include/jemalloc/internal/pai.h b/include/jemalloc/internal/pai.h index 73f5433c..16e022d5 100644 --- a/include/jemalloc/internal/pai.h +++ b/include/jemalloc/internal/pai.h @@ -8,6 +8,14 @@ struct pai_s { /* Returns NULL on failure. */ edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero); + /* + * Returns the number of extents added to the list (which may be fewer + * than requested, in case of OOM). The list should already be + * initialized. The only alignment guarantee is page-alignment, and + * the results are not necessarily zeroed. + */ + size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results); bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool zero); bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata, @@ -28,6 +36,12 @@ pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero) { return self->alloc(tsdn, self, size, alignment, zero); } +static inline size_t +pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs, + edata_list_active_t *results) { + return self->alloc_batch(tsdn, self, size, nallocs, results); +} + static inline bool pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool zero) { @@ -51,9 +65,12 @@ pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list) { } /* - * An implementation of batch deallocation that simply calls dalloc once for + * An implementation of batch allocation that simply calls alloc once for * each item in the list. */ +size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results); +/* Ditto, for dalloc. */ void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list); diff --git a/src/hpa.c b/src/hpa.c index fa58bb77..338d5759 100644 --- a/src/hpa.c +++ b/src/hpa.c @@ -90,6 +90,7 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base, * operating on corrupted data. */ shard->pai.alloc = &hpa_alloc; + shard->pai.alloc_batch = &pai_alloc_batch_default; shard->pai.expand = &hpa_expand; shard->pai.shrink = &hpa_shrink; shard->pai.dalloc = &hpa_dalloc; diff --git a/src/pac.c b/src/pac.c index 0ba0f2f0..93427ca1 100644 --- a/src/pac.c +++ b/src/pac.c @@ -91,6 +91,7 @@ pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap, atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED); pac->pai.alloc = &pac_alloc_impl; + pac->pai.alloc_batch = &pai_alloc_batch_default; pac->pai.expand = &pac_expand_impl; pac->pai.shrink = &pac_shrink_impl; pac->pai.dalloc = &pac_dalloc_impl; diff --git a/src/pai.c b/src/pai.c index 1035c850..bd6966c9 100644 --- a/src/pai.c +++ b/src/pai.c @@ -1,6 +1,19 @@ #include "jemalloc/internal/jemalloc_preamble.h" #include "jemalloc/internal/jemalloc_internal_includes.h" +size_t +pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results) { + for (size_t i = 0; i < nallocs; i++) { + edata_t *edata = pai_alloc(tsdn, self, size, PAGE, + /* zero */ false); + if (edata == NULL) { + return i; + } + edata_list_active_append(results, edata); + } + return nallocs; +} void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self, diff --git a/src/sec.c b/src/sec.c index 49b41047..af7c2910 100644 --- a/src/sec.c +++ b/src/sec.c @@ -52,6 +52,7 @@ sec_init(sec_t *sec, pai_t *fallback, size_t nshards, size_t alloc_max, * initialization failed will segfault in an easy-to-spot way. */ sec->pai.alloc = &sec_alloc; + sec->pai.alloc_batch = &pai_alloc_batch_default; sec->pai.expand = &sec_expand; sec->pai.shrink = &sec_shrink; sec->pai.dalloc = &sec_dalloc; diff --git a/test/unit/sec.c b/test/unit/sec.c index 5fe3550c..69132c1f 100644 --- a/test/unit/sec.c +++ b/test/unit/sec.c @@ -7,6 +7,7 @@ struct pai_test_allocator_s { pai_t pai; bool alloc_fail; size_t alloc_count; + size_t alloc_batch_count; size_t dalloc_count; size_t dalloc_batch_count; /* @@ -42,6 +43,28 @@ pai_test_allocator_alloc(tsdn_t *tsdn, pai_t *self, size_t size, return edata; } +static inline size_t +pai_test_allocator_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, + size_t nallocs, edata_list_active_t *results) { + pai_test_allocator_t *ta = (pai_test_allocator_t *)self; + if (ta->alloc_fail) { + return 0; + } + for (size_t i = 0; i < nallocs; i++) { + edata_t *edata = malloc(sizeof(edata_t)); + assert_ptr_not_null(edata, ""); + edata_init(edata, /* arena_ind */ 0, + (void *)ta->next_ptr, size, + /* slab */ false, /* szind */ 0, /* sn */ 1, + extent_state_active, /* zero */ false, /* comitted */ true, + /* ranged */ false, EXTENT_NOT_HEAD); + ta->next_ptr += size; + ta->alloc_batch_count++; + edata_list_active_append(results, edata); + } + return nallocs; +} + static bool pai_test_allocator_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size, size_t new_size, bool zero) { @@ -82,6 +105,7 @@ static inline void pai_test_allocator_init(pai_test_allocator_t *ta) { ta->alloc_fail = false; ta->alloc_count = 0; + ta->alloc_batch_count = 0; ta->dalloc_count = 0; ta->dalloc_batch_count = 0; /* Just don't start the edata at 0. */ @@ -91,6 +115,7 @@ pai_test_allocator_init(pai_test_allocator_t *ta) { ta->shrink_count = 0; ta->shrink_return_value = false; ta->pai.alloc = &pai_test_allocator_alloc; + ta->pai.alloc_batch = &pai_test_allocator_alloc_batch; ta->pai.expand = &pai_test_allocator_expand; ta->pai.shrink = &pai_test_allocator_shrink; ta->pai.dalloc = &pai_test_allocator_dalloc;