Add psset: a set of pageslabs.

This introduces a new sort of edata_t; a pageslab, and a set to manage them.
This is part of a series of a commits to implement a hugepage allocator; the
pageset will be per-arena, and track small page allocations requests within a
larger extent allocated from a centralized hugepage allocator.
This commit is contained in:
David Goldblatt 2020-07-10 17:40:13 -07:00 committed by David Goldblatt
parent ed99d300b9
commit 018b162d67
9 changed files with 670 additions and 1 deletions

View File

@ -136,6 +136,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/prof_log.c \
$(srcroot)src/prof_recent.c \
$(srcroot)src/prof_sys.c \
$(srcroot)src/psset.c \
$(srcroot)src/rtree.c \
$(srcroot)src/safety_check.c \
$(srcroot)src/sc.c \
@ -239,6 +240,7 @@ TESTS_UNIT := \
$(srcroot)test/unit/prof_tctx.c \
$(srcroot)test/unit/prof_thread_name.c \
$(srcroot)test/unit/prof_sys_thread_name.c \
$(srcroot)test/unit/psset.c \
$(srcroot)test/unit/ql.c \
$(srcroot)test/unit/qr.c \
$(srcroot)test/unit/rb.c \

View File

@ -202,7 +202,31 @@ struct edata_s {
* This keeps the size of an edata_t at exactly 128 bytes on
* architectures with 8-byte pointers and 4k pages.
*/
void *reserved1, *reserved2;
void *reserved1;
union {
/*
* We could steal a low bit from these fields to indicate what
* sort of "thing" this is (a page slab, an object within a page
* slab, or a non-pageslab range). We don't do this yet, but it
* would enable some extra asserts.
*/
/*
* If this edata is from an HPA, it may be part of some larger
* pageslab. Track it if so. Otherwise (either because it's
* not part of a pageslab, or not from the HPA at all), NULL.
*/
edata_t *ps;
/*
* If this edata *is* a pageslab, then it has some longest free
* range in it. Track it.
*/
struct {
uint32_t longest_free_range;
/* Not yet tracked. */
/* uint32_t longest_free_range_pos; */
};
};
union {
/*
@ -346,6 +370,18 @@ edata_bsize_get(const edata_t *edata) {
return edata->e_bsize;
}
static inline edata_t *
edata_ps_get(const edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
return edata->ps;
}
static inline uint32_t
edata_longest_free_range_get(const edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
return edata->longest_free_range;
}
static inline void *
edata_before_get(const edata_t *edata) {
return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
@ -428,6 +464,19 @@ edata_bsize_set(edata_t *edata, size_t bsize) {
edata->e_bsize = bsize;
}
static inline void
edata_ps_set(edata_t *edata, edata_t *ps) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA || ps == NULL);
edata->ps = ps;
}
static inline void
edata_longest_free_range_set(edata_t *edata, uint32_t longest_free_range) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA
|| longest_free_range == 0);
edata->longest_free_range = longest_free_range;
}
static inline void
edata_szind_set(edata_t *edata, szind_t szind) {
assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
@ -562,6 +611,8 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
if (config_prof) {
edata_prof_tctx_set(edata, NULL);
}
edata_ps_set(edata, NULL);
edata_longest_free_range_set(edata, 0);
}
static inline void
@ -581,6 +632,8 @@ edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
* wasting a state bit to encode this fact.
*/
edata_pai_set(edata, EXTENT_PAI_PAC);
edata_ps_set(edata, NULL);
edata_longest_free_range_set(edata, 0);
}
static inline int

View File

@ -0,0 +1,61 @@
#ifndef JEMALLOC_INTERNAL_PSSET_H
#define JEMALLOC_INTERNAL_PSSET_H
/*
* A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
* a collection of page-slabs (the intent being that they are backed by
* hugepages, or at least could be), and handles allocation and deallocation
* requests.
*
* It has the same synchronization guarantees as the eset; stats queries don't
* need any external synchronization, everything else does.
*/
/*
* One more than the maximum pszind_t we will serve out of the HPA.
* Practically, we expect only the first few to be actually used. This
* corresponds to a maximum size of of 512MB on systems with 4k pages and
* SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
* can think of this as being SC_NPSIZES, but there's no sense in wasting that
* much space in the arena, making bitmaps that much larger, etc.
*/
#define PSSET_NPSIZES 64
typedef struct psset_s psset_t;
struct psset_s {
/*
* The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab.
*/
edata_heap_t pageslabs[PSSET_NPSIZES];
bitmap_t bitmap[BITMAP_GROUPS(PSSET_NPSIZES)];
};
void psset_init(psset_t *psset);
/*
* Tries to obtain a chunk from an existing pageslab already in the set.
* Returns true on failure.
*/
bool psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size);
/*
* Given a newly created pageslab ps (not currently in the set), pass ownership
* to the psset and allocate an extent from within it. The passed-in pageslab
* must be at least as big as size.
*/
void psset_alloc_new(psset_t *psset, edata_t *ps,
edata_t *r_edata, size_t size);
/*
* Given an extent that comes from a pageslab in this pageslab set, returns it
* to its slab. Does not take ownership of the underlying edata_t.
*
* If some slab becomes empty as a result of the dalloc, it is retuend -- the
* result must be checked and deallocated to the central HPA. Otherwise returns
* NULL.
*/
edata_t *psset_dalloc(psset_t *psset, edata_t *edata);
#endif /* JEMALLOC_INTERNAL_PSSET_H */

View File

@ -76,6 +76,7 @@
<ClCompile Include="..\..\..\..\src\prof_log.c" />
<ClCompile Include="..\..\..\..\src\prof_recent.c" />
<ClCompile Include="..\..\..\..\src\prof_sys.c" />
<ClCompile Include="..\..\..\..\src\psset.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
<ClCompile Include="..\..\..\..\src\sc.c" />

View File

@ -112,6 +112,9 @@
<ClCompile Include="..\..\..\..\src\prof_sys.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\psset.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -76,6 +76,7 @@
<ClCompile Include="..\..\..\..\src\prof_log.c" />
<ClCompile Include="..\..\..\..\src\prof_recent.c" />
<ClCompile Include="..\..\..\..\src\prof_sys.c" />
<ClCompile Include="..\..\..\..\src\psset.c" />
<ClCompile Include="..\..\..\..\src\rtree.c" />
<ClCompile Include="..\..\..\..\src\safety_check.c" />
<ClCompile Include="..\..\..\..\src\sc.c" />

View File

@ -112,6 +112,9 @@
<ClCompile Include="..\..\..\..\src\prof_sys.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\psset.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\rtree.c">
<Filter>Source Files</Filter>
</ClCompile>

239
src/psset.c Normal file
View File

@ -0,0 +1,239 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/psset.h"
#include "jemalloc/internal/flat_bitmap.h"
static const bitmap_info_t psset_bitmap_info =
BITMAP_INFO_INITIALIZER(PSSET_NPSIZES);
void
psset_init(psset_t *psset) {
for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
edata_heap_new(&psset->pageslabs[i]);
}
bitmap_init(psset->bitmap, &psset_bitmap_info, /* fill */ true);
}
JEMALLOC_ALWAYS_INLINE void
psset_assert_ps_consistent(edata_t *ps) {
assert(fb_urange_longest(edata_slab_data_get(ps)->bitmap,
edata_size_get(ps) >> LG_PAGE) == edata_longest_free_range_get(ps));
}
/*
* Similar to PAC's extent_recycle_extract. Out of all the pageslabs in the
* set, picks one that can satisfy the allocation and remove it from the set.
*/
static edata_t *
psset_recycle_extract(psset_t *psset, size_t size) {
pszind_t ret_ind;
edata_t *ret = NULL;
pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
for (pszind_t i = (pszind_t)bitmap_ffu(psset->bitmap,
&psset_bitmap_info, (size_t)pind);
i < PSSET_NPSIZES;
i = (pszind_t)bitmap_ffu(psset->bitmap, &psset_bitmap_info,
(size_t)i + 1)) {
assert(!edata_heap_empty(&psset->pageslabs[i]));
edata_t *ps = edata_heap_first(&psset->pageslabs[i]);
if (ret == NULL || edata_snad_comp(ps, ret) < 0) {
ret = ps;
ret_ind = i;
}
}
if (ret == NULL) {
return NULL;
}
edata_heap_remove(&psset->pageslabs[ret_ind], ret);
if (edata_heap_empty(&psset->pageslabs[ret_ind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info, ret_ind);
}
psset_assert_ps_consistent(ret);
return ret;
}
static void
psset_insert(psset_t *psset, edata_t *ps, size_t largest_range) {
psset_assert_ps_consistent(ps);
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
largest_range << LG_PAGE));
assert(pind < PSSET_NPSIZES);
if (edata_heap_empty(&psset->pageslabs[pind])) {
bitmap_unset(psset->bitmap, &psset_bitmap_info, (size_t)pind);
}
edata_heap_insert(&psset->pageslabs[pind], ps);
}
/*
* Given a pageslab ps and an edata to allocate size bytes from, initializes the
* edata with a range in the pageslab, and puts ps back in the set.
*/
static void
psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
size_t size) {
size_t start = 0;
/*
* These are dead stores, but the compiler will issue warnings on them
* since it can't tell statically that found is always true below.
*/
size_t begin = 0;
size_t len = 0;
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap;
size_t npages = size >> LG_PAGE;
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
size_t largest_unchosen_range = 0;
while (true) {
bool found = fb_urange_iter(ps_fb, ps_npages, start, &begin,
&len);
/*
* A precondition to this function is that ps must be able to
* serve the allocation.
*/
assert(found);
if (len >= npages) {
/*
* We use first-fit within the page slabs; this gives
* bounded worst-case fragmentation within a slab. It's
* not necessarily right; we could experiment with
* various other options.
*/
break;
}
if (len > largest_unchosen_range) {
largest_unchosen_range = len;
}
start = begin + len;
}
uintptr_t addr = (uintptr_t)edata_base_get(ps) + begin * PAGE;
edata_init(r_edata, edata_arena_ind_get(r_edata), (void *)addr, size,
/* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
/* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
EXTENT_NOT_HEAD);
edata_ps_set(r_edata, ps);
fb_set_range(ps_fb, ps_npages, begin, npages);
/*
* OK, we've got to put the pageslab back. First we have to figure out
* where, though; we've only checked run sizes before the pageslab we
* picked. We also need to look for ones after the one we picked. Note
* that we want begin + npages as the start position, not begin + len;
* we might not have used the whole range.
*
* TODO: With a little bit more care, we can guarantee that the longest
* free range field in the edata is accurate upon entry, and avoid doing
* this check in the case where we're allocating from some smaller run.
*/
start = begin + npages;
while (start < ps_npages) {
bool found = fb_urange_iter(ps_fb, ps_npages, start, &begin,
&len);
if (!found) {
break;
}
if (len > largest_unchosen_range) {
largest_unchosen_range = len;
}
start = begin + len;
}
edata_longest_free_range_set(ps, (uint32_t)largest_unchosen_range);
if (largest_unchosen_range != 0) {
psset_insert(psset, ps, largest_unchosen_range);
}
}
bool
psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
edata_t *ps = psset_recycle_extract(psset, size);
if (ps == NULL) {
return true;
}
psset_ps_alloc_insert(psset, ps, r_edata, size);
return false;
}
void
psset_alloc_new(psset_t *psset, edata_t *ps, edata_t *r_edata, size_t size) {
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap;
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
assert(fb_empty(ps_fb, ps_npages));
assert(ps_npages >= (size >> LG_PAGE));
psset_ps_alloc_insert(psset, ps, r_edata, size);
}
edata_t *
psset_dalloc(psset_t *psset, edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
assert(edata_ps_get(edata) != NULL);
edata_t *ps = edata_ps_get(edata);
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap;
size_t ps_old_longest_free_range = edata_longest_free_range_get(ps);
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
size_t begin =
((uintptr_t)edata_base_get(edata) - (uintptr_t)edata_base_get(ps))
>> LG_PAGE;
size_t len = edata_size_get(edata) >> LG_PAGE;
fb_unset_range(ps_fb, ps_npages, begin, len);
/* We might have just created a new, larger range. */
size_t new_begin = (size_t)(fb_fls(ps_fb, ps_npages, begin) + 1);
size_t new_end = fb_ffs(ps_fb, ps_npages, begin + len - 1);
size_t new_range_len = new_end - new_begin;
/*
* If the new free range is no longer than the previous longest one,
* then the pageslab is non-empty and doesn't need to change bins.
* We're done, and don't need to return a pageslab to evict.
*/
if (new_range_len <= ps_old_longest_free_range) {
return NULL;
}
/*
* Otherwise, it might need to get evicted from the set, or change its
* bin.
*/
edata_longest_free_range_set(ps, (uint32_t)new_range_len);
/*
* If it was previously non-full, then it's in some (possibly now
* incorrect) bin already; remove it.
*
* TODO: We bailed out early above if we didn't expand the longest free
* range, which should avoid a lot of redundant remove/reinserts in the
* same bin. But it doesn't eliminate all of them; it's possible that
* we decreased the longest free range length, but only slightly, and
* not enough to change our pszind. We could check that more precisely.
* (Or, ideally, size class dequantization will happen at some point,
* and the issue becomes moot).
*/
if (ps_old_longest_free_range > 0) {
pszind_t old_pind = sz_psz2ind(sz_psz_quantize_floor(
ps_old_longest_free_range<< LG_PAGE));
edata_heap_remove(&psset->pageslabs[old_pind], ps);
if (edata_heap_empty(&psset->pageslabs[old_pind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info,
(size_t)old_pind);
}
}
/* If the pageslab is empty, it gets evicted from the set. */
if (new_range_len == ps_npages) {
return ps;
}
/* Otherwise, it gets reinserted. */
pszind_t new_pind = sz_psz2ind(sz_psz_quantize_floor(
new_range_len << LG_PAGE));
if (edata_heap_empty(&psset->pageslabs[new_pind])) {
bitmap_unset(psset->bitmap, &psset_bitmap_info,
(size_t)new_pind);
}
edata_heap_insert(&psset->pageslabs[new_pind], ps);
return NULL;
}

306
test/unit/psset.c Normal file
View File

@ -0,0 +1,306 @@
#include "test/jemalloc_test.h"
#include "jemalloc/internal/psset.h"
#define PAGESLAB_PAGES 64
#define PAGESLAB_SIZE (PAGESLAB_PAGES << LG_PAGE)
#define PAGESLAB_SN 123
#define PAGESLAB_ADDR ((void *)(1234 << LG_PAGE))
#define ALLOC_ARENA_IND 111
#define ALLOC_ESN 222
static void
edata_init_test(edata_t *edata) {
memset(edata, 0, sizeof(*edata));
edata_arena_ind_set(edata, ALLOC_ARENA_IND);
edata_esn_set(edata, ALLOC_ESN);
}
static void
edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) {
/*
* Note that allocations should get the arena ind of their home
* arena, *not* the arena ind of the pageslab allocator.
*/
expect_u_eq(ALLOC_ARENA_IND, edata_arena_ind_get(edata),
"Arena ind changed");
expect_ptr_eq(
(void *)((uintptr_t)PAGESLAB_ADDR + (page_offset << LG_PAGE)),
edata_addr_get(edata), "Didn't allocate in order");
expect_zu_eq(page_cnt << LG_PAGE, edata_size_get(edata), "");
expect_false(edata_slab_get(edata), "");
expect_u_eq(SC_NSIZES, edata_szind_get_maybe_invalid(edata),
"");
expect_zu_eq(0, edata_sn_get(edata), "");
expect_d_eq(edata_state_get(edata), extent_state_active, "");
expect_false(edata_zeroed_get(edata), "");
expect_true(edata_committed_get(edata), "");
expect_d_eq(EXTENT_PAI_HPA, edata_pai_get(edata), "");
expect_false(edata_is_head_get(edata), "");
}
TEST_BEGIN(test_empty) {
bool err;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc;
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
edata_init_test(&alloc);
psset_t psset;
psset_init(&psset);
/* Empty psset should return fail allocations. */
err = psset_alloc_reuse(&psset, &alloc, PAGE);
expect_true(err, "Empty psset succeeded in an allocation.");
}
TEST_END
TEST_BEGIN(test_fill) {
bool err;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[PAGESLAB_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset;
psset_init(&psset);
edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) {
edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
}
for (size_t i = 0; i < PAGESLAB_PAGES; i++) {
edata_t *edata = &alloc[i];
edata_expect(edata, i, 1);
}
/* The pageslab, and thus psset, should now have no allocations. */
edata_t extra_alloc;
edata_init_test(&extra_alloc);
err = psset_alloc_reuse(&psset, &extra_alloc, PAGE);
expect_true(err, "Alloc succeeded even though psset should be empty");
}
TEST_END
TEST_BEGIN(test_reuse) {
bool err;
edata_t *ps;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[PAGESLAB_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset;
psset_init(&psset);
edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) {
edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
}
/* Free odd indices. */
for (size_t i = 0; i < PAGESLAB_PAGES; i ++) {
if (i % 2 == 0) {
continue;
}
ps = psset_dalloc(&psset, &alloc[i]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
}
/* Realloc into them. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) {
if (i % 2 == 0) {
continue;
}
err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
edata_expect(&alloc[i], i, 1);
}
/* Now, free the pages at indices 0 or 1 mod 2. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) {
if (i % 4 > 1) {
continue;
}
ps = psset_dalloc(&psset, &alloc[i]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
}
/* And realloc 2-page allocations into them. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) {
if (i % 4 != 0) {
continue;
}
err = psset_alloc_reuse(&psset, &alloc[i], 2 * PAGE);
expect_false(err, "Nonempty psset failed page allocation.");
edata_expect(&alloc[i], i, 2);
}
/* Free all the 2-page allocations. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) {
if (i % 4 != 0) {
continue;
}
ps = psset_dalloc(&psset, &alloc[i]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
}
/*
* Free up a 1-page hole next to a 2-page hole, but somewhere in the
* middle of the pageslab. Index 11 should be right before such a hole
* (since 12 % 4 == 0).
*/
size_t index_of_3 = 11;
ps = psset_dalloc(&psset, &alloc[index_of_3]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
err = psset_alloc_reuse(&psset, &alloc[index_of_3], 3 * PAGE);
expect_false(err, "Should have been able to find alloc.");
edata_expect(&alloc[index_of_3], index_of_3, 3);
/* Free up a 4-page hole at the end. */
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 1]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 2]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
/* Make sure we can satisfy an allocation at the very end of a slab. */
size_t index_of_4 = PAGESLAB_PAGES - 4;
ps = psset_dalloc(&psset, &alloc[index_of_4]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
err = psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE);
expect_false(err, "Should have been able to find alloc.");
edata_expect(&alloc[index_of_4], index_of_4, 4);
}
TEST_END
TEST_BEGIN(test_evict) {
bool err;
edata_t *ps;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[PAGESLAB_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset;
psset_init(&psset);
/* Alloc the whole slab. */
edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) {
edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Unxpected allocation failure");
}
/* Dealloc the whole slab, going forwards. */
for (size_t i = 0; i < PAGESLAB_PAGES - 1; i++) {
ps = psset_dalloc(&psset, &alloc[i]);
expect_ptr_null(ps, "Nonempty pageslab evicted");
}
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 1]);
expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted.");
err = psset_alloc_reuse(&psset, &alloc[0], PAGE);
expect_true(err, "psset should be empty.");
}
TEST_END
TEST_BEGIN(test_multi_pageslab) {
bool err;
edata_t *ps;
edata_t pageslab[2];
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[2][PAGESLAB_PAGES];
edata_init(&pageslab[0], /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
edata_init(&pageslab[1], /* arena_ind */ 0,
(void *)((uintptr_t)PAGESLAB_ADDR + PAGESLAB_SIZE), PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset;
psset_init(&psset);
/* Insert both slabs. */
edata_init_test(&alloc[0][0]);
psset_alloc_new(&psset, &pageslab[0], &alloc[0][0], PAGE);
edata_init_test(&alloc[1][0]);
psset_alloc_new(&psset, &pageslab[1], &alloc[1][0], PAGE);
/* Fill them both up; make sure we do so in first-fit order. */
for (size_t i = 0; i < 2; i++) {
for (size_t j = 1; j < PAGESLAB_PAGES; j++) {
edata_init_test(&alloc[i][j]);
err = psset_alloc_reuse(&psset, &alloc[i][j], PAGE);
expect_false(err,
"Nonempty psset failed page allocation.");
assert_ptr_eq(&pageslab[i], edata_ps_get(&alloc[i][j]),
"Didn't pick pageslabs in first-fit");
}
}
/*
* Free up a 2-page hole in the earlier slab, and a 1-page one in the
* later one. We should still pick the earlier slab for a 1-page
* allocation.
*/
ps = psset_dalloc(&psset, &alloc[0][0]);
expect_ptr_null(ps, "Unexpected eviction");
ps = psset_dalloc(&psset, &alloc[0][1]);
expect_ptr_null(ps, "Unexpected eviction");
ps = psset_dalloc(&psset, &alloc[1][0]);
expect_ptr_null(ps, "Unexpected eviction");
err = psset_alloc_reuse(&psset, &alloc[0][0], PAGE);
expect_ptr_eq(&pageslab[0], edata_ps_get(&alloc[0][0]),
"Should have picked first pageslab");
/*
* Now both slabs have 1-page holes. Free up a second one in the later
* slab.
*/
ps = psset_dalloc(&psset, &alloc[1][1]);
expect_ptr_null(ps, "Unexpected eviction");
/*
* We should be able to allocate a 2-page object, even though an earlier
* size class is nonempty.
*/
err = psset_alloc_reuse(&psset, &alloc[1][0], 2 * PAGE);
expect_false(err, "Allocation should have succeeded");
}
TEST_END
int
main(void) {
return test_no_reentrancy(
test_empty,
test_fill,
test_reuse,
test_evict,
test_multi_pageslab);
}