Introduce hpdata_t.

Using an edata_t both for hugepages and the allocations within those hugepages
was convenient at first, but has outlived its usefulness.  Representing
hugepages explicitly, with their own data structure, will make future
development easier.
This commit is contained in:
David Goldblatt 2020-11-17 16:32:45 -08:00 committed by David Goldblatt
parent 4a15008cfb
commit ca30b5db2b
17 changed files with 414 additions and 405 deletions

View File

@ -122,6 +122,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/hook.c \ $(srcroot)src/hook.c \
$(srcroot)src/hpa.c \ $(srcroot)src/hpa.c \
$(srcroot)src/hpa_central.c \ $(srcroot)src/hpa_central.c \
$(srcroot)src/hpdata.c \
$(srcroot)src/inspect.c \ $(srcroot)src/inspect.c \
$(srcroot)src/large.c \ $(srcroot)src/large.c \
$(srcroot)src/log.c \ $(srcroot)src/log.c \

View File

@ -4,6 +4,7 @@
#include "jemalloc/internal/atomic.h" #include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bin_info.h" #include "jemalloc/internal/bin_info.h"
#include "jemalloc/internal/bit_util.h" #include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/hpdata.h"
#include "jemalloc/internal/nstime.h" #include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/ph.h" #include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h" #include "jemalloc/internal/ql.h"
@ -71,7 +72,6 @@ struct edata_map_info_s {
typedef struct edata_s edata_t; typedef struct edata_s edata_t;
typedef ph(edata_t) edata_avail_t; typedef ph(edata_t) edata_avail_t;
typedef ph(edata_t) edata_heap_t; typedef ph(edata_t) edata_heap_t;
typedef ph(edata_t) edata_age_heap_t;
struct edata_s { struct edata_s {
/* /*
* Bitfield containing several fields: * Bitfield containing several fields:
@ -194,41 +194,13 @@ struct edata_s {
}; };
/* /*
* In some context-specific sense, the age of an active extent. Each * If this edata is a user allocation from an HPA, it comes out of some
* context can pick a specific meaning, and share the definition of the * pageslab (we don't yet support huegpage allocations that don't fit
* edata_age_heap_t below. * into pageslabs). This tracks it.
*/ */
uint64_t age; hpdata_t *e_ps;
union { /* Extra field reserved for HPA. */
/* void *e_reserved;
* We could steal a low bit from these fields to indicate what
* sort of "thing" this is (a page slab, an object within a page
* slab, or a non-pageslab range). We don't do this yet, but it
* would enable some extra asserts.
*/
/*
* If this edata is a user allocation from an HPA, it comes out
* of some pageslab (we don't yet support huegpage allocations
* that don't fit into pageslabs). This tracks it.
*/
edata_t *ps;
/*
* If this edata *is* a pageslab, then we cache some useful
* information about its associated bitmap.
*/
struct {
/*
* The longest free range a pageslab contains determines
* the heap it lives in. If we know that it didn't
* change after an operation, we can avoid moving it
* between heaps.
*/
uint32_t longest_free_range;
/* Whether or not the slab is backed by a hugepage. */
bool hugeified;
};
};
union { union {
/* /*
@ -330,11 +302,6 @@ edata_pai_get(const edata_t *edata) {
EDATA_BITS_PAI_SHIFT); EDATA_BITS_PAI_SHIFT);
} }
static inline bool
edata_hugeified_get(const edata_t *edata) {
return edata->hugeified;
}
static inline bool static inline bool
edata_slab_get(const edata_t *edata) { edata_slab_get(const edata_t *edata) {
return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >> return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
@ -377,21 +344,10 @@ edata_bsize_get(const edata_t *edata) {
return edata->e_bsize; return edata->e_bsize;
} }
static inline uint64_t static inline hpdata_t *
edata_age_get(const edata_t *edata) {
return edata->age;
}
static inline edata_t *
edata_ps_get(const edata_t *edata) { edata_ps_get(const edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA); assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
return edata->ps; return edata->e_ps;
}
static inline uint32_t
edata_longest_free_range_get(const edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
return edata->longest_free_range;
} }
static inline void * static inline void *
@ -477,21 +433,9 @@ edata_bsize_set(edata_t *edata, size_t bsize) {
} }
static inline void static inline void
edata_age_set(edata_t *edata, uint64_t age) { edata_ps_set(edata_t *edata, hpdata_t *ps) {
edata->age = age; assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
} edata->e_ps = ps;
static inline void
edata_ps_set(edata_t *edata, edata_t *ps) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA || ps == NULL);
edata->ps = ps;
}
static inline void
edata_longest_free_range_set(edata_t *edata, uint32_t longest_free_range) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA
|| longest_free_range == 0);
edata->longest_free_range = longest_free_range;
} }
static inline void static inline void
@ -566,11 +510,6 @@ edata_pai_set(edata_t *edata, extent_pai_t pai) {
((uint64_t)pai << EDATA_BITS_PAI_SHIFT); ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
} }
static inline void
edata_hugeified_set(edata_t *edata, bool hugeified) {
edata->hugeified = hugeified;
}
static inline void static inline void
edata_slab_set(edata_t *edata, bool slab) { edata_slab_set(edata_t *edata, bool slab) {
edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) | edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
@ -633,9 +572,6 @@ edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
if (config_prof) { if (config_prof) {
edata_prof_tctx_set(edata, NULL); edata_prof_tctx_set(edata, NULL);
} }
edata_age_set(edata, 0);
edata_ps_set(edata, NULL);
edata_longest_free_range_set(edata, 0);
} }
static inline void static inline void
@ -649,15 +585,12 @@ edata_binit(edata_t *edata, void *addr, size_t bsize, size_t sn) {
edata_state_set(edata, extent_state_active); edata_state_set(edata, extent_state_active);
edata_zeroed_set(edata, true); edata_zeroed_set(edata, true);
edata_committed_set(edata, true); edata_committed_set(edata, true);
edata_age_set(edata, 0);
/* /*
* This isn't strictly true, but base allocated extents never get * This isn't strictly true, but base allocated extents never get
* deallocated and can't be looked up in the emap, but no sense in * deallocated and can't be looked up in the emap, but no sense in
* wasting a state bit to encode this fact. * wasting a state bit to encode this fact.
*/ */
edata_pai_set(edata, EXTENT_PAI_PAC); edata_pai_set(edata, EXTENT_PAI_PAC);
edata_ps_set(edata, NULL);
edata_longest_free_range_set(edata, 0);
} }
static inline int static inline int
@ -718,25 +651,7 @@ edata_esnead_comp(const edata_t *a, const edata_t *b) {
return ret; return ret;
} }
static inline int
edata_age_comp(const edata_t *a, const edata_t *b) {
uint64_t a_age = edata_age_get(a);
uint64_t b_age = edata_age_get(b);
/*
* Equal ages are possible in certain race conditions, like two distinct
* threads simultaneously allocating a new fresh slab without holding a
* bin lock.
*/
int ret = (a_age > b_age) - (a_age < b_age);
if (ret != 0) {
return ret;
}
return edata_snad_comp(a, b);
}
ph_proto(, edata_avail_, edata_avail_t, edata_t) ph_proto(, edata_avail_, edata_avail_t, edata_t)
ph_proto(, edata_heap_, edata_heap_t, edata_t) ph_proto(, edata_heap_, edata_heap_t, edata_t)
ph_proto(, edata_age_heap_, edata_age_heap_t, edata_t);
#endif /* JEMALLOC_INTERNAL_EDATA_H */ #endif /* JEMALLOC_INTERNAL_EDATA_H */

View File

@ -21,6 +21,8 @@ struct hpa_shard_s {
pai_t pai; pai_t pai;
malloc_mutex_t grow_mtx; malloc_mutex_t grow_mtx;
malloc_mutex_t mtx; malloc_mutex_t mtx;
/* The base metadata allocator. */
base_t *base;
/* /*
* This edata cache is the one we use when allocating a small extent * This edata cache is the one we use when allocating a small extent
* from a pageslab. The pageslab itself comes from the centralized * from a pageslab. The pageslab itself comes from the centralized
@ -45,7 +47,14 @@ struct hpa_shard_s {
* *
* Guarded by grow_mtx. * Guarded by grow_mtx.
*/ */
edata_list_inactive_t unused_slabs; hpdata_list_t unused_slabs;
/*
* How many grow operations have occurred.
*
* Guarded by grow_mtx.
*/
uint64_t age_counter;
/* /*
* Either NULL (if empty), or some integer multiple of a * Either NULL (if empty), or some integer multiple of a
@ -54,7 +63,8 @@ struct hpa_shard_s {
* *
* Guarded by grow_mtx. * Guarded by grow_mtx.
*/ */
edata_t *eden; void *eden;
size_t eden_len;
/* The arena ind we're associated with. */ /* The arena ind we're associated with. */
unsigned ind; unsigned ind;
@ -67,7 +77,7 @@ struct hpa_shard_s {
* just that it can function properly given the system it's running on. * just that it can function properly given the system it's running on.
*/ */
bool hpa_supported(); bool hpa_supported();
bool hpa_shard_init(hpa_shard_t *shard, emap_t *emap, bool hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
edata_cache_t *edata_cache, unsigned ind, size_t alloc_max); edata_cache_t *edata_cache, unsigned ind, size_t alloc_max);
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src); void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);

View File

@ -0,0 +1,124 @@
#ifndef JEMALLOC_INTERNAL_HPDATA_H
#define JEMALLOC_INTERNAL_HPDATA_H
#include "jemalloc/internal/flat_bitmap.h"
#include "jemalloc/internal/ph.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/typed_list.h"
/*
* The metadata representation we use for extents in hugepages. While the PAC
* uses the edata_t to represent both active and inactive extents, the HP only
* uses the edata_t for active ones; instead, inactive extent state is tracked
* within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
* region of virtual address space.
*
* An hpdata need not be "truly" backed by a hugepage (which is not necessarily
* an observable property of any given region of address space). It's just
* hugepage-sized and hugepage-aligned; it's *potentially* huge.
*/
typedef struct hpdata_s hpdata_t;
struct hpdata_s {
/*
* We likewise follow the edata convention of mangling names and forcing
* the use of accessors -- this lets us add some consistency checks on
* access.
*/
/*
* The address of the hugepage in question. This can't be named h_addr,
* since that conflicts with a macro defined in Windows headers.
*/
void *h_address;
/* Its age (measured in psset operations). */
uint64_t h_age;
/* Whether or not we think the hugepage is mapped that way by the OS. */
bool h_huge;
union {
/* When nonempty, used by the psset bins. */
phn(hpdata_t) ph_link;
/*
* When empty (or not corresponding to any hugepage), list
* linkage.
*/
ql_elm(hpdata_t) ql_link;
};
/* Number of currently free pages (regardless of contiguity). */
size_t h_nfree;
/* The length of the largest contiguous sequence of inactive pages. */
size_t h_longest_free_range;
/* A bitmap with bits set in the active pages. */
fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
};
static inline void *
hpdata_addr_get(const hpdata_t *hpdata) {
return hpdata->h_address;
}
static inline void
hpdata_addr_set(hpdata_t *hpdata, void *addr) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
hpdata->h_address = addr;
}
static inline uint64_t
hpdata_age_get(const hpdata_t *hpdata) {
return hpdata->h_age;
}
static inline void
hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
hpdata->h_age = age;
}
static inline bool
hpdata_huge_get(const hpdata_t *hpdata) {
return hpdata->h_huge;
}
static inline void
hpdata_huge_set(hpdata_t *hpdata, bool huge) {
hpdata->h_huge = huge;
}
static inline size_t
hpdata_nfree_get(const hpdata_t *hpdata) {
return hpdata->h_nfree;
}
static inline void
hpdata_nfree_set(hpdata_t *hpdata, size_t nfree) {
assert(nfree <= HUGEPAGE_PAGES);
hpdata->h_nfree = nfree;
}
static inline size_t
hpdata_longest_free_range_get(const hpdata_t *hpdata) {
return hpdata->h_longest_free_range;
}
static inline void
hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
assert(longest_free_range <= HUGEPAGE_PAGES);
hpdata->h_longest_free_range = longest_free_range;
}
static inline void
hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
hpdata_addr_set(hpdata, addr);
hpdata_age_set(hpdata, age);
hpdata_huge_set(hpdata, false);
hpdata_nfree_set(hpdata, HUGEPAGE_PAGES);
hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES);
fb_init(hpdata->active_pages, HUGEPAGE_PAGES);
}
TYPED_LIST(hpdata_list, hpdata_t, ql_link)
typedef ph(hpdata_t) hpdata_age_heap_t;
ph_proto(, hpdata_age_heap_, hpdata_age_heap_t, hpdata_t);
#endif /* JEMALLOC_INTERNAL_HPDATA_H */

View File

@ -17,6 +17,20 @@
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */ /* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) #define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) #define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
#if LG_HUGEPAGE != 0
# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
#else
/*
* It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
* we can't autodetect the hugepage size, it gets treated as 0, in which case
* we'll trigger a compiler error in those arrays. Avoid this case by ensuring
* that this value is at least 1. (We won't ever run in this degraded state;
* hpa_supported() returns false in this case.
*/
# define HUGEPAGE_PAGES 1
#endif
/* Return the huge page base address for the huge page containing address a. */ /* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \ #define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))

View File

@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_PSSET_H #ifndef JEMALLOC_INTERNAL_PSSET_H
#define JEMALLOC_INTERNAL_PSSET_H #define JEMALLOC_INTERNAL_PSSET_H
#include "jemalloc/internal/hpdata.h"
/* /*
* A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains * A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
* a collection of page-slabs (the intent being that they are backed by * a collection of page-slabs (the intent being that they are backed by
@ -51,21 +53,18 @@ struct psset_s {
* The pageslabs, quantized by the size class of the largest contiguous * The pageslabs, quantized by the size class of the largest contiguous
* free run of pages in a pageslab. * free run of pages in a pageslab.
*/ */
edata_age_heap_t pageslabs[PSSET_NPSIZES]; hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
bitmap_t bitmap[BITMAP_GROUPS(PSSET_NPSIZES)]; bitmap_t bitmap[BITMAP_GROUPS(PSSET_NPSIZES)];
psset_stats_t stats; psset_stats_t stats;
/* How many alloc_new calls have happened? */
uint64_t age_counter;
}; };
void psset_init(psset_t *psset); void psset_init(psset_t *psset);
void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src); void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
void psset_insert(psset_t *psset, edata_t *ps); void psset_insert(psset_t *psset, hpdata_t *ps);
void psset_remove(psset_t *psset, edata_t *ps); void psset_remove(psset_t *psset, hpdata_t *ps);
void psset_hugify(psset_t *psset, edata_t *ps); void psset_hugify(psset_t *psset, hpdata_t *ps);
/* /*
* Tries to obtain a chunk from an existing pageslab already in the set. * Tries to obtain a chunk from an existing pageslab already in the set.
@ -78,7 +77,7 @@ bool psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size);
* to the psset and allocate an extent from within it. The passed-in pageslab * to the psset and allocate an extent from within it. The passed-in pageslab
* must be at least as big as size. * must be at least as big as size.
*/ */
void psset_alloc_new(psset_t *psset, edata_t *ps, void psset_alloc_new(psset_t *psset, hpdata_t *ps,
edata_t *r_edata, size_t size); edata_t *r_edata, size_t size);
/* /*
@ -89,6 +88,6 @@ void psset_alloc_new(psset_t *psset, edata_t *ps,
* result must be checked and deallocated to the central HPA. Otherwise returns * result must be checked and deallocated to the central HPA. Otherwise returns
* NULL. * NULL.
*/ */
edata_t *psset_dalloc(psset_t *psset, edata_t *edata); hpdata_t *psset_dalloc(psset_t *psset, edata_t *edata);
#endif /* JEMALLOC_INTERNAL_PSSET_H */ #endif /* JEMALLOC_INTERNAL_PSSET_H */

View File

@ -62,6 +62,7 @@
<ClCompile Include="..\..\..\..\src\hook.c" /> <ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\hpa.c" /> <ClCompile Include="..\..\..\..\src\hpa.c" />
<ClCompile Include="..\..\..\..\src\hpa_central.c" /> <ClCompile Include="..\..\..\..\src\hpa_central.c" />
<ClCompile Include="..\..\..\..\src\hpdata.c" />
<ClCompile Include="..\..\..\..\src\inspect.c" /> <ClCompile Include="..\..\..\..\src\inspect.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" /> <ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" /> <ClCompile Include="..\..\..\..\src\large.c" />

View File

@ -70,6 +70,9 @@
<ClCompile Include="..\..\..\..\src\hpa_central.c"> <ClCompile Include="..\..\..\..\src\hpa_central.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hpdata.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\inspect.c"> <ClCompile Include="..\..\..\..\src\inspect.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>

View File

@ -62,6 +62,7 @@
<ClCompile Include="..\..\..\..\src\hook.c" /> <ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\hpa.c" /> <ClCompile Include="..\..\..\..\src\hpa.c" />
<ClCompile Include="..\..\..\..\src\hpa_central.c" /> <ClCompile Include="..\..\..\..\src\hpa_central.c" />
<ClCompile Include="..\..\..\..\src\hpdata.c" />
<ClCompile Include="..\..\..\..\src\inspect.c" /> <ClCompile Include="..\..\..\..\src\inspect.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" /> <ClCompile Include="..\..\..\..\src\jemalloc.c" />
<ClCompile Include="..\..\..\..\src\large.c" /> <ClCompile Include="..\..\..\..\src\large.c" />

View File

@ -70,6 +70,9 @@
<ClCompile Include="..\..\..\..\src\hpa_central.c"> <ClCompile Include="..\..\..\..\src\hpa_central.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>
<ClCompile Include="..\..\..\..\src\hpdata.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\inspect.c"> <ClCompile Include="..\..\..\..\src\inspect.c">
<Filter>Source Files</Filter> <Filter>Source Files</Filter>
</ClCompile> </ClCompile>

View File

@ -4,4 +4,3 @@
ph_gen(, edata_avail_, edata_avail_t, edata_t, ph_link, ph_gen(, edata_avail_, edata_avail_t, edata_t, ph_link,
edata_esnead_comp) edata_esnead_comp)
ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp) ph_gen(, edata_heap_, edata_heap_t, edata_t, ph_link, edata_snad_comp)
ph_gen(, edata_age_heap_, edata_age_heap_t, edata_t, ph_link, edata_age_comp)

178
src/hpa.c
View File

@ -33,22 +33,22 @@ hpa_supported() {
* We fundamentally rely on a address-space-hungry growth strategy for * We fundamentally rely on a address-space-hungry growth strategy for
* hugepages. * hugepages.
*/ */
if (LG_SIZEOF_PTR == 2) { if (LG_SIZEOF_PTR != 3) {
return false; return false;
} }
/* /*
* We use the edata bitmap; it needs to have at least as many bits as a * If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
* hugepage has pages. * this sentinel value -- see the comment in pages.h.
*/ */
if (HUGEPAGE / PAGE > BITMAP_GROUPS_MAX * sizeof(bitmap_t) * 8) { if (HUGEPAGE_PAGES == 1) {
return false; return false;
} }
return true; return true;
} }
bool bool
hpa_shard_init(hpa_shard_t *shard, emap_t *emap, edata_cache_t *edata_cache, hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
unsigned ind, size_t alloc_max) { edata_cache_t *edata_cache, unsigned ind, size_t alloc_max) {
/* malloc_conf processing should have filtered out these cases. */ /* malloc_conf processing should have filtered out these cases. */
assert(hpa_supported()); assert(hpa_supported());
bool err; bool err;
@ -64,11 +64,14 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, edata_cache_t *edata_cache,
} }
assert(edata_cache != NULL); assert(edata_cache != NULL);
shard->base = base;
edata_cache_small_init(&shard->ecs, edata_cache); edata_cache_small_init(&shard->ecs, edata_cache);
psset_init(&shard->psset); psset_init(&shard->psset);
shard->alloc_max = alloc_max; shard->alloc_max = alloc_max;
edata_list_inactive_init(&shard->unused_slabs); hpdata_list_init(&shard->unused_slabs);
shard->age_counter = 0;
shard->eden = NULL; shard->eden = NULL;
shard->eden_len = 0;
shard->ind = ind; shard->ind = ind;
shard->emap = emap; shard->emap = emap;
@ -104,22 +107,27 @@ hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
malloc_mutex_unlock(tsdn, &shard->mtx); malloc_mutex_unlock(tsdn, &shard->mtx);
} }
static hpdata_t *
hpa_alloc_ps(tsdn_t *tsdn, hpa_shard_t *shard) {
return (hpdata_t *)base_alloc(tsdn, shard->base, sizeof(hpdata_t),
CACHELINE);
}
static bool static bool
hpa_should_hugify(hpa_shard_t *shard, edata_t *ps) { hpa_should_hugify(hpa_shard_t *shard, hpdata_t *ps) {
/* /*
* For now, just use a static check; hugify a page if it's <= 5% * For now, just use a static check; hugify a page if it's <= 5%
* inactive. Eventually, this should be a malloc conf option. * inactive. Eventually, this should be a malloc conf option.
*/ */
return !edata_hugeified_get(ps) return !hpdata_huge_get(ps)
&& edata_nfree_get(ps) < (HUGEPAGE / PAGE) * 5 / 100; && hpdata_nfree_get(ps) < (HUGEPAGE / PAGE) * 5 / 100;
} }
/* Returns true on error. */ /* Returns true on error. */
static void static void
hpa_hugify(edata_t *ps) { hpa_hugify(hpdata_t *ps) {
assert(edata_size_get(ps) == HUGEPAGE); assert(hpdata_huge_get(ps));
assert(edata_hugeified_get(ps)); bool err = pages_huge(hpdata_addr_get(ps), HUGEPAGE);
bool err = pages_huge(edata_base_get(ps), HUGEPAGE);
/* /*
* Eat the error; even if the hugeification failed, it's still safe to * Eat the error; even if the hugeification failed, it's still safe to
* pretend it didn't (and would require extraordinary measures to * pretend it didn't (and would require extraordinary measures to
@ -129,30 +137,36 @@ hpa_hugify(edata_t *ps) {
} }
static void static void
hpa_dehugify(edata_t *ps) { hpa_dehugify(hpdata_t *ps) {
/* Purge, then dehugify while unbacked. */ /* Purge, then dehugify while unbacked. */
pages_purge_forced(edata_addr_get(ps), HUGEPAGE); pages_purge_forced(hpdata_addr_get(ps), HUGEPAGE);
pages_nohuge(edata_addr_get(ps), HUGEPAGE); pages_nohuge(hpdata_addr_get(ps), HUGEPAGE);
edata_hugeified_set(ps, false); hpdata_huge_set(ps, false);
} }
static edata_t * static hpdata_t *
hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) { hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx); malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
edata_t *ps = NULL; hpdata_t *ps = NULL;
/* Is there address space waiting for reuse? */ /* Is there address space waiting for reuse? */
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx); malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
ps = edata_list_inactive_first(&shard->unused_slabs); ps = hpdata_list_first(&shard->unused_slabs);
if (ps != NULL) { if (ps != NULL) {
edata_list_inactive_remove(&shard->unused_slabs, ps); hpdata_list_remove(&shard->unused_slabs, ps);
hpdata_age_set(ps, shard->age_counter++);
return ps; return ps;
} }
/* Is eden a perfect fit? */ /* Is eden a perfect fit? */
if (shard->eden != NULL && edata_size_get(shard->eden) == HUGEPAGE) { if (shard->eden != NULL && shard->eden_len == HUGEPAGE) {
ps = shard->eden; ps = hpa_alloc_ps(tsdn, shard);
if (ps == NULL) {
return NULL;
}
hpdata_init(ps, shard->eden, shard->age_counter++);
shard->eden = NULL; shard->eden = NULL;
shard->eden_len = 0;
return ps; return ps;
} }
@ -173,78 +187,32 @@ hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
if (new_eden == NULL) { if (new_eden == NULL) {
return NULL; return NULL;
} }
malloc_mutex_lock(tsdn, &shard->mtx); ps = hpa_alloc_ps(tsdn, shard);
/* Allocate ps edata, bailing if we fail. */
ps = edata_cache_small_get(tsdn, &shard->ecs);
if (ps == NULL) { if (ps == NULL) {
malloc_mutex_unlock(tsdn, &shard->mtx);
pages_unmap(new_eden, HPA_EDEN_SIZE); pages_unmap(new_eden, HPA_EDEN_SIZE);
return NULL; return NULL;
} }
/* Allocate eden edata, bailing if we fail. */ shard->eden = new_eden;
shard->eden = edata_cache_small_get(tsdn, &shard->ecs); shard->eden_len = HPA_EDEN_SIZE;
if (shard->eden == NULL) {
edata_cache_small_put(tsdn, &shard->ecs, ps);
malloc_mutex_unlock(tsdn, &shard->mtx);
pages_unmap(new_eden, HPA_EDEN_SIZE);
return NULL;
}
/* Success. */
malloc_mutex_unlock(tsdn, &shard->mtx);
/*
* Note that the values here don't really make sense (e.g. eden
* is actually zeroed). But we don't use the slab metadata in
* determining subsequent allocation metadata (e.g. zero
* tracking should be done at the per-page level, not at the
* level of the hugepage). It's just a convenient data
* structure that contains much of the helpers we need (defined
* lists, a bitmap, an address field, etc.). Eventually, we'll
* have a "real" representation of a hugepage that's unconnected
* to the edata_ts it will serve allocations into.
*/
edata_init(shard->eden, shard->ind, new_eden, HPA_EDEN_SIZE,
/* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_dirty,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
/* is_head */ true);
edata_hugeified_set(shard->eden, false);
} else { } else {
/* Eden is already nonempty; only need an edata for ps. */ /* Eden is already nonempty; only need an edata for ps. */
malloc_mutex_lock(tsdn, &shard->mtx); ps = hpa_alloc_ps(tsdn, shard);
ps = edata_cache_small_get(tsdn, &shard->ecs);
malloc_mutex_unlock(tsdn, &shard->mtx);
if (ps == NULL) { if (ps == NULL) {
return NULL; return NULL;
} }
} }
/* assert(ps != NULL);
* We should have dropped mtx since we're not touching ecs any more, but
* we should continue to hold the grow mutex, since we're about to touch
* eden.
*/
malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
malloc_mutex_assert_owner(tsdn, &shard->grow_mtx);
assert(shard->eden != NULL); assert(shard->eden != NULL);
assert(edata_size_get(shard->eden) > HUGEPAGE); assert(shard->eden_len > HUGEPAGE);
assert(edata_size_get(shard->eden) % HUGEPAGE == 0); assert(shard->eden_len % HUGEPAGE == 0);
assert(edata_addr_get(shard->eden) assert(HUGEPAGE_ADDR2BASE(shard->eden) == shard->eden);
== HUGEPAGE_ADDR2BASE(edata_addr_get(shard->eden)));
malloc_mutex_lock(tsdn, &shard->mtx); hpdata_init(ps, shard->eden, shard->age_counter++);
ps = edata_cache_small_get(tsdn, &shard->ecs);
malloc_mutex_unlock(tsdn, &shard->mtx); char *eden_char = (char *)shard->eden;
if (ps == NULL) { eden_char += HUGEPAGE;
return NULL; shard->eden = (void *)eden_char;
} shard->eden_len -= HUGEPAGE;
edata_init(ps, edata_arena_ind_get(shard->eden),
edata_addr_get(shard->eden), HUGEPAGE, /* slab */ false,
/* szind */ SC_NSIZES, /* sn */ 0, extent_state_dirty,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
/* is_head */ true);
edata_hugeified_set(ps, false);
edata_addr_set(shard->eden, edata_past_get(ps));
edata_size_set(shard->eden,
edata_size_get(shard->eden) - HUGEPAGE);
return ps; return ps;
} }
@ -255,7 +223,7 @@ hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
* their address space in a list outside the psset. * their address space in a list outside the psset.
*/ */
static void static void
hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *ps) { hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, hpdata_t *ps) {
/* /*
* We do relatively expensive system calls. The ps was evicted, so no * We do relatively expensive system calls. The ps was evicted, so no
* one should touch it while we're also touching it. * one should touch it while we're also touching it.
@ -263,9 +231,6 @@ hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *ps) {
malloc_mutex_assert_not_owner(tsdn, &shard->mtx); malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
malloc_mutex_assert_not_owner(tsdn, &shard->grow_mtx); malloc_mutex_assert_not_owner(tsdn, &shard->grow_mtx);
assert(edata_size_get(ps) == HUGEPAGE);
assert(HUGEPAGE_ADDR2BASE(edata_addr_get(ps)) == edata_addr_get(ps));
/* /*
* We do this unconditionally, even for pages which were not originally * We do this unconditionally, even for pages which were not originally
* hugeified; it has the same effect. * hugeified; it has the same effect.
@ -273,7 +238,7 @@ hpa_handle_ps_eviction(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *ps) {
hpa_dehugify(ps); hpa_dehugify(ps);
malloc_mutex_lock(tsdn, &shard->grow_mtx); malloc_mutex_lock(tsdn, &shard->grow_mtx);
edata_list_inactive_prepend(&shard->unused_slabs, ps); hpdata_list_prepend(&shard->unused_slabs, ps);
malloc_mutex_unlock(tsdn, &shard->grow_mtx); malloc_mutex_unlock(tsdn, &shard->grow_mtx);
} }
@ -307,7 +272,7 @@ hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom)
err = emap_register_boundary(tsdn, shard->emap, edata, err = emap_register_boundary(tsdn, shard->emap, edata,
SC_NSIZES, /* slab */ false); SC_NSIZES, /* slab */ false);
if (err) { if (err) {
edata_t *ps = psset_dalloc(&shard->psset, edata); hpdata_t *ps = psset_dalloc(&shard->psset, edata);
/* /*
* The pageslab was nonempty before we started; it * The pageslab was nonempty before we started; it
* should still be nonempty now, and so shouldn't get * should still be nonempty now, and so shouldn't get
@ -320,7 +285,7 @@ hpa_try_alloc_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size, bool *oom)
return NULL; return NULL;
} }
edata_t *ps = edata_ps_get(edata); hpdata_t *ps = edata_ps_get(edata);
assert(ps != NULL); assert(ps != NULL);
bool hugify = hpa_should_hugify(shard, ps); bool hugify = hpa_should_hugify(shard, ps);
if (hugify) { if (hugify) {
@ -378,16 +343,11 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
* deallocations (and allocations of smaller sizes) may still succeed * deallocations (and allocations of smaller sizes) may still succeed
* while we're doing this potentially expensive system call. * while we're doing this potentially expensive system call.
*/ */
edata_t *grow_edata = hpa_grow(tsdn, shard); hpdata_t *grow_ps = hpa_grow(tsdn, shard);
if (grow_edata == NULL) { if (grow_ps == NULL) {
malloc_mutex_unlock(tsdn, &shard->grow_mtx); malloc_mutex_unlock(tsdn, &shard->grow_mtx);
return NULL; return NULL;
} }
assert(edata_arena_ind_get(grow_edata) == shard->ind);
edata_slab_set(grow_edata, true);
fb_group_t *fb = edata_slab_data_get(grow_edata)->bitmap;
fb_init(fb, HUGEPAGE / PAGE);
/* We got the new edata; allocate from it. */ /* We got the new edata; allocate from it. */
malloc_mutex_lock(tsdn, &shard->mtx); malloc_mutex_lock(tsdn, &shard->mtx);
@ -395,18 +355,19 @@ hpa_alloc_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size) {
if (edata == NULL) { if (edata == NULL) {
malloc_mutex_unlock(tsdn, &shard->mtx); malloc_mutex_unlock(tsdn, &shard->mtx);
malloc_mutex_unlock(tsdn, &shard->grow_mtx); malloc_mutex_unlock(tsdn, &shard->grow_mtx);
hpa_handle_ps_eviction(tsdn, shard, grow_ps);
return NULL; return NULL;
} }
psset_alloc_new(&shard->psset, grow_edata, edata, size); psset_alloc_new(&shard->psset, grow_ps, edata, size);
err = emap_register_boundary(tsdn, shard->emap, edata, err = emap_register_boundary(tsdn, shard->emap, edata,
SC_NSIZES, /* slab */ false); SC_NSIZES, /* slab */ false);
if (err) { if (err) {
edata_t *ps = psset_dalloc(&shard->psset, edata); hpdata_t *ps = psset_dalloc(&shard->psset, edata);
/* /*
* The pageslab was empty except for the new allocation; it * The pageslab was empty except for the new allocation; it
* should get evicted. * should get evicted.
*/ */
assert(ps == grow_edata); assert(ps == grow_ps);
edata_cache_small_put(tsdn, &shard->ecs, edata); edata_cache_small_put(tsdn, &shard->ecs, edata);
/* /*
* Technically the same as fallthrough at the time of this * Technically the same as fallthrough at the time of this
@ -496,7 +457,7 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
assert(edata_committed_get(edata)); assert(edata_committed_get(edata));
assert(edata_base_get(edata) != NULL); assert(edata_base_get(edata) != NULL);
edata_t *ps = edata_ps_get(edata); hpdata_t *ps = edata_ps_get(edata);
/* Currently, all edatas come from pageslabs. */ /* Currently, all edatas come from pageslabs. */
assert(ps != NULL); assert(ps != NULL);
emap_deregister_boundary(tsdn, shard->emap, edata); emap_deregister_boundary(tsdn, shard->emap, edata);
@ -506,7 +467,7 @@ hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata) {
* Page slabs can move between pssets (and have their hugeified status * Page slabs can move between pssets (and have their hugeified status
* change) in racy ways. * change) in racy ways.
*/ */
edata_t *evicted_ps = psset_dalloc(&shard->psset, edata); hpdata_t *evicted_ps = psset_dalloc(&shard->psset, edata);
/* /*
* If a pageslab became empty because of the dalloc, it better have been * If a pageslab became empty because of the dalloc, it better have been
* the one we expected. * the one we expected.
@ -562,11 +523,10 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
hpa_assert_empty(tsdn, shard, &shard->psset); hpa_assert_empty(tsdn, shard, &shard->psset);
malloc_mutex_unlock(tsdn, &shard->mtx); malloc_mutex_unlock(tsdn, &shard->mtx);
} }
edata_t *ps; hpdata_t *ps;
while ((ps = edata_list_inactive_first(&shard->unused_slabs)) != NULL) { while ((ps = hpdata_list_first(&shard->unused_slabs)) != NULL) {
assert(edata_size_get(ps) == HUGEPAGE); hpdata_list_remove(&shard->unused_slabs, ps);
edata_list_inactive_remove(&shard->unused_slabs, ps); pages_unmap(hpdata_addr_get(ps), HUGEPAGE);
pages_unmap(edata_base_get(ps), HUGEPAGE);
} }
} }

18
src/hpdata.c Normal file
View File

@ -0,0 +1,18 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpdata.h"
static int
hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
uint64_t a_age = hpdata_age_get(a);
uint64_t b_age = hpdata_age_get(b);
/*
* hpdata ages are operation counts in the psset; no two should be the
* same.
*/
assert(a_age != b_age);
return (a_age > b_age) - (a_age < b_age);
}
ph_gen(, hpdata_age_heap_, hpdata_age_heap_t, hpdata_t, ph_link, hpdata_age_comp)

View File

@ -51,8 +51,8 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
bool bool
pa_shard_enable_hpa(pa_shard_t *shard, size_t alloc_max, size_t sec_nshards, pa_shard_enable_hpa(pa_shard_t *shard, size_t alloc_max, size_t sec_nshards,
size_t sec_alloc_max, size_t sec_bytes_max) { size_t sec_alloc_max, size_t sec_bytes_max) {
if (hpa_shard_init(&shard->hpa_shard, shard->emap, &shard->edata_cache, if (hpa_shard_init(&shard->hpa_shard, shard->emap, shard->base,
shard->ind, alloc_max)) { &shard->edata_cache, shard->ind, alloc_max)) {
return true; return true;
} }
if (sec_init(&shard->hpa_sec, &shard->hpa_shard.pai, sec_nshards, if (sec_init(&shard->hpa_sec, &shard->hpa_shard.pai, sec_nshards,

View File

@ -11,11 +11,10 @@ static const bitmap_info_t psset_bitmap_info =
void void
psset_init(psset_t *psset) { psset_init(psset_t *psset) {
for (unsigned i = 0; i < PSSET_NPSIZES; i++) { for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
edata_age_heap_new(&psset->pageslabs[i]); hpdata_age_heap_new(&psset->pageslabs[i]);
} }
bitmap_init(psset->bitmap, &psset_bitmap_info, /* fill */ true); bitmap_init(psset->bitmap, &psset_bitmap_info, /* fill */ true);
memset(&psset->stats, 0, sizeof(psset->stats)); memset(&psset->stats, 0, sizeof(psset->stats));
psset->age_counter = 0;
} }
static void static void
@ -49,18 +48,17 @@ psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
* ensure we don't miss any heap modification operations. * ensure we don't miss any heap modification operations.
*/ */
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
psset_bin_stats_insert_remove(psset_bin_stats_t *binstats, edata_t *ps, psset_bin_stats_insert_remove(psset_bin_stats_t *binstats, hpdata_t *ps,
bool insert) { bool insert) {
size_t *npageslabs_dst = edata_hugeified_get(ps) size_t *npageslabs_dst = hpdata_huge_get(ps)
? &binstats->npageslabs_huge : &binstats->npageslabs_nonhuge; ? &binstats->npageslabs_huge : &binstats->npageslabs_nonhuge;
size_t *nactive_dst = edata_hugeified_get(ps) size_t *nactive_dst = hpdata_huge_get(ps)
? &binstats->nactive_huge : &binstats->nactive_nonhuge; ? &binstats->nactive_huge : &binstats->nactive_nonhuge;
size_t *ninactive_dst = edata_hugeified_get(ps) size_t *ninactive_dst = hpdata_huge_get(ps)
? &binstats->ninactive_huge : &binstats->ninactive_nonhuge; ? &binstats->ninactive_huge : &binstats->ninactive_nonhuge;
size_t npages = edata_size_get(ps) >> LG_PAGE; size_t ninactive = hpdata_nfree_get(ps);
size_t ninactive = edata_nfree_get(ps); size_t nactive = HUGEPAGE_PAGES - ninactive;
size_t nactive = npages - ninactive;
size_t mul = insert ? (size_t)1 : (size_t)-1; size_t mul = insert ? (size_t)1 : (size_t)-1;
*npageslabs_dst += mul * 1; *npageslabs_dst += mul * 1;
@ -69,12 +67,12 @@ psset_bin_stats_insert_remove(psset_bin_stats_t *binstats, edata_t *ps,
} }
static void static void
psset_bin_stats_insert(psset_bin_stats_t *binstats, edata_t *ps) { psset_bin_stats_insert(psset_bin_stats_t *binstats, hpdata_t *ps) {
psset_bin_stats_insert_remove(binstats, ps, /* insert */ true); psset_bin_stats_insert_remove(binstats, ps, /* insert */ true);
} }
static void static void
psset_bin_stats_remove(psset_bin_stats_t *binstats, edata_t *ps) { psset_bin_stats_remove(psset_bin_stats_t *binstats, hpdata_t *ps) {
psset_bin_stats_insert_remove(binstats, ps, /* insert */ false); psset_bin_stats_insert_remove(binstats, ps, /* insert */ false);
} }
@ -96,27 +94,27 @@ psset_bin_stats_deactivate(psset_bin_stats_t *binstats, bool huge, size_t num) {
} }
static void static void
psset_edata_heap_remove(psset_t *psset, pszind_t pind, edata_t *ps) { psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) {
edata_age_heap_remove(&psset->pageslabs[pind], ps); hpdata_age_heap_remove(&psset->pageslabs[pind], ps);
psset_bin_stats_remove(&psset->stats.nonfull_slabs[pind], ps); psset_bin_stats_remove(&psset->stats.nonfull_slabs[pind], ps);
} }
static void static void
psset_edata_heap_insert(psset_t *psset, pszind_t pind, edata_t *ps) { psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) {
edata_age_heap_insert(&psset->pageslabs[pind], ps); hpdata_age_heap_insert(&psset->pageslabs[pind], ps);
psset_bin_stats_insert(&psset->stats.nonfull_slabs[pind], ps); psset_bin_stats_insert(&psset->stats.nonfull_slabs[pind], ps);
} }
JEMALLOC_ALWAYS_INLINE void JEMALLOC_ALWAYS_INLINE void
psset_assert_ps_consistent(edata_t *ps) { psset_assert_ps_consistent(hpdata_t *ps) {
assert(fb_urange_longest(edata_slab_data_get(ps)->bitmap, assert(fb_urange_longest(ps->active_pages, HUGEPAGE_PAGES)
edata_size_get(ps) >> LG_PAGE) == edata_longest_free_range_get(ps)); == hpdata_longest_free_range_get(ps));
} }
void void
psset_insert(psset_t *psset, edata_t *ps) { psset_insert(psset_t *psset, hpdata_t *ps) {
psset_assert_ps_consistent(ps); psset_assert_ps_consistent(ps);
size_t longest_free_range = edata_longest_free_range_get(ps); size_t longest_free_range = hpdata_longest_free_range_get(ps);
if (longest_free_range == 0) { if (longest_free_range == 0) {
/* /*
@ -131,16 +129,16 @@ psset_insert(psset_t *psset, edata_t *ps) {
longest_free_range << LG_PAGE)); longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES); assert(pind < PSSET_NPSIZES);
if (edata_age_heap_empty(&psset->pageslabs[pind])) { if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
bitmap_unset(psset->bitmap, &psset_bitmap_info, (size_t)pind); bitmap_unset(psset->bitmap, &psset_bitmap_info, (size_t)pind);
} }
psset_edata_heap_insert(psset, pind, ps); psset_hpdata_heap_insert(psset, pind, ps);
} }
void void
psset_remove(psset_t *psset, edata_t *ps) { psset_remove(psset_t *psset, hpdata_t *ps) {
psset_assert_ps_consistent(ps); psset_assert_ps_consistent(ps);
size_t longest_free_range = edata_longest_free_range_get(ps); size_t longest_free_range = hpdata_longest_free_range_get(ps);
if (longest_free_range == 0) { if (longest_free_range == 0) {
psset_bin_stats_remove(&psset->stats.full_slabs, ps); psset_bin_stats_remove(&psset->stats.full_slabs, ps);
@ -150,18 +148,18 @@ psset_remove(psset_t *psset, edata_t *ps) {
pszind_t pind = sz_psz2ind(sz_psz_quantize_floor( pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
longest_free_range << LG_PAGE)); longest_free_range << LG_PAGE));
assert(pind < PSSET_NPSIZES); assert(pind < PSSET_NPSIZES);
psset_edata_heap_remove(psset, pind, ps); psset_hpdata_heap_remove(psset, pind, ps);
if (edata_age_heap_empty(&psset->pageslabs[pind])) { if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info, (size_t)pind); bitmap_set(psset->bitmap, &psset_bitmap_info, (size_t)pind);
} }
} }
void void
psset_hugify(psset_t *psset, edata_t *ps) { psset_hugify(psset_t *psset, hpdata_t *ps) {
assert(!edata_hugeified_get(ps)); assert(!hpdata_huge_get(ps));
psset_assert_ps_consistent(ps); psset_assert_ps_consistent(ps);
size_t longest_free_range = edata_longest_free_range_get(ps); size_t longest_free_range = hpdata_longest_free_range_get(ps);
psset_bin_stats_t *bin_stats; psset_bin_stats_t *bin_stats;
if (longest_free_range == 0) { if (longest_free_range == 0) {
bin_stats = &psset->stats.full_slabs; bin_stats = &psset->stats.full_slabs;
@ -172,7 +170,7 @@ psset_hugify(psset_t *psset, edata_t *ps) {
bin_stats = &psset->stats.nonfull_slabs[pind]; bin_stats = &psset->stats.nonfull_slabs[pind];
} }
psset_bin_stats_remove(bin_stats, ps); psset_bin_stats_remove(bin_stats, ps);
edata_hugeified_set(ps, true); hpdata_huge_set(ps, true);
psset_bin_stats_insert(bin_stats, ps); psset_bin_stats_insert(bin_stats, ps);
} }
@ -180,7 +178,7 @@ psset_hugify(psset_t *psset, edata_t *ps) {
* Similar to PAC's extent_recycle_extract. Out of all the pageslabs in the * Similar to PAC's extent_recycle_extract. Out of all the pageslabs in the
* set, picks one that can satisfy the allocation and remove it from the set. * set, picks one that can satisfy the allocation and remove it from the set.
*/ */
static edata_t * static hpdata_t *
psset_recycle_extract(psset_t *psset, size_t size) { psset_recycle_extract(psset_t *psset, size_t size) {
pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size)); pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
pszind_t pind = (pszind_t)bitmap_ffu(psset->bitmap, &psset_bitmap_info, pszind_t pind = (pszind_t)bitmap_ffu(psset->bitmap, &psset_bitmap_info,
@ -188,13 +186,13 @@ psset_recycle_extract(psset_t *psset, size_t size) {
if (pind == PSSET_NPSIZES) { if (pind == PSSET_NPSIZES) {
return NULL; return NULL;
} }
edata_t *ps = edata_age_heap_first(&psset->pageslabs[pind]); hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]);
if (ps == NULL) { if (ps == NULL) {
return NULL; return NULL;
} }
psset_edata_heap_remove(psset, pind, ps); psset_hpdata_heap_remove(psset, pind, ps);
if (edata_age_heap_empty(&psset->pageslabs[pind])) { if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info, pind); bitmap_set(psset->bitmap, &psset_bitmap_info, pind);
} }
@ -207,7 +205,7 @@ psset_recycle_extract(psset_t *psset, size_t size) {
* edata with a range in the pageslab, and puts ps back in the set. * edata with a range in the pageslab, and puts ps back in the set.
*/ */
static void static void
psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata, psset_ps_alloc_insert(psset_t *psset, hpdata_t *ps, edata_t *r_edata,
size_t size) { size_t size) {
size_t start = 0; size_t start = 0;
/* /*
@ -217,15 +215,14 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
size_t begin = 0; size_t begin = 0;
size_t len = 0; size_t len = 0;
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap; fb_group_t *ps_fb = ps->active_pages;
size_t npages = size >> LG_PAGE; size_t npages = size >> LG_PAGE;
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
size_t largest_unchosen_range = 0; size_t largest_unchosen_range = 0;
while (true) { while (true) {
bool found = fb_urange_iter(ps_fb, ps_npages, start, &begin, bool found = fb_urange_iter(ps_fb, HUGEPAGE_PAGES, start,
&len); &begin, &len);
/* /*
* A precondition to this function is that ps must be able to * A precondition to this function is that ps must be able to
* serve the allocation. * serve the allocation.
@ -245,14 +242,14 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
} }
start = begin + len; start = begin + len;
} }
uintptr_t addr = (uintptr_t)edata_base_get(ps) + begin * PAGE; uintptr_t addr = (uintptr_t)hpdata_addr_get(ps) + begin * PAGE;
edata_init(r_edata, edata_arena_ind_get(r_edata), (void *)addr, size, edata_init(r_edata, edata_arena_ind_get(r_edata), (void *)addr, size,
/* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active, /* slab */ false, SC_NSIZES, /* sn */ 0, extent_state_active,
/* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA, /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
EXTENT_NOT_HEAD); EXTENT_NOT_HEAD);
edata_ps_set(r_edata, ps); edata_ps_set(r_edata, ps);
fb_set_range(ps_fb, ps_npages, begin, npages); fb_set_range(ps_fb, HUGEPAGE_PAGES, begin, npages);
edata_nfree_set(ps, (uint32_t)(edata_nfree_get(ps) - npages)); hpdata_nfree_set(ps, (uint32_t)(hpdata_nfree_get(ps) - npages));
/* The pageslab isn't in a bin, so no bin stats need to change. */ /* The pageslab isn't in a bin, so no bin stats need to change. */
/* /*
@ -267,8 +264,8 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
* this check in the case where we're allocating from some smaller run. * this check in the case where we're allocating from some smaller run.
*/ */
start = begin + npages; start = begin + npages;
while (start < ps_npages) { while (start < HUGEPAGE_PAGES) {
bool found = fb_urange_iter(ps_fb, ps_npages, start, &begin, bool found = fb_urange_iter(ps_fb, HUGEPAGE_PAGES, start, &begin,
&len); &len);
if (!found) { if (!found) {
break; break;
@ -278,7 +275,7 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
} }
start = begin + len; start = begin + len;
} }
edata_longest_free_range_set(ps, (uint32_t)largest_unchosen_range); hpdata_longest_free_range_set(ps, (uint32_t)largest_unchosen_range);
if (largest_unchosen_range == 0) { if (largest_unchosen_range == 0) {
psset_bin_stats_insert(&psset->stats.full_slabs, ps); psset_bin_stats_insert(&psset->stats.full_slabs, ps);
} else { } else {
@ -288,7 +285,7 @@ psset_ps_alloc_insert(psset_t *psset, edata_t *ps, edata_t *r_edata,
bool bool
psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) { psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
edata_t *ps = psset_recycle_extract(psset, size); hpdata_t *ps = psset_recycle_extract(psset, size);
if (ps == NULL) { if (ps == NULL) {
return true; return true;
} }
@ -297,48 +294,43 @@ psset_alloc_reuse(psset_t *psset, edata_t *r_edata, size_t size) {
} }
void void
psset_alloc_new(psset_t *psset, edata_t *ps, edata_t *r_edata, size_t size) { psset_alloc_new(psset_t *psset, hpdata_t *ps, edata_t *r_edata, size_t size) {
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap; fb_group_t *ps_fb = ps->active_pages;
size_t ps_npages = edata_size_get(ps) >> LG_PAGE; assert(fb_empty(ps_fb, HUGEPAGE_PAGES));
assert(fb_empty(ps_fb, ps_npages)); assert(hpdata_nfree_get(ps) == HUGEPAGE_PAGES);
assert(ps_npages >= (size >> LG_PAGE));
edata_nfree_set(ps, (uint32_t)ps_npages);
edata_age_set(ps, psset->age_counter);
psset->age_counter++;
psset_ps_alloc_insert(psset, ps, r_edata, size); psset_ps_alloc_insert(psset, ps, r_edata, size);
} }
edata_t * hpdata_t *
psset_dalloc(psset_t *psset, edata_t *edata) { psset_dalloc(psset_t *psset, edata_t *edata) {
assert(edata_pai_get(edata) == EXTENT_PAI_HPA); assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
assert(edata_ps_get(edata) != NULL); assert(edata_ps_get(edata) != NULL);
edata_t *ps = edata_ps_get(edata); hpdata_t *ps = edata_ps_get(edata);
fb_group_t *ps_fb = edata_slab_data_get(ps)->bitmap; fb_group_t *ps_fb = ps->active_pages;
size_t ps_old_longest_free_range = edata_longest_free_range_get(ps); size_t ps_old_longest_free_range = hpdata_longest_free_range_get(ps);
pszind_t old_pind = SC_NPSIZES; pszind_t old_pind = SC_NPSIZES;
if (ps_old_longest_free_range != 0) { if (ps_old_longest_free_range != 0) {
old_pind = sz_psz2ind(sz_psz_quantize_floor( old_pind = sz_psz2ind(sz_psz_quantize_floor(
ps_old_longest_free_range << LG_PAGE)); ps_old_longest_free_range << LG_PAGE));
} }
size_t ps_npages = edata_size_get(ps) >> LG_PAGE;
size_t begin = size_t begin =
((uintptr_t)edata_base_get(edata) - (uintptr_t)edata_base_get(ps)) ((uintptr_t)edata_base_get(edata) - (uintptr_t)hpdata_addr_get(ps))
>> LG_PAGE; >> LG_PAGE;
size_t len = edata_size_get(edata) >> LG_PAGE; size_t len = edata_size_get(edata) >> LG_PAGE;
fb_unset_range(ps_fb, ps_npages, begin, len); fb_unset_range(ps_fb, HUGEPAGE_PAGES, begin, len);
/* The pageslab is still in the bin; adjust its stats first. */ /* The pageslab is still in the bin; adjust its stats first. */
psset_bin_stats_t *bin_stats = (ps_old_longest_free_range == 0 psset_bin_stats_t *bin_stats = (ps_old_longest_free_range == 0
? &psset->stats.full_slabs : &psset->stats.nonfull_slabs[old_pind]); ? &psset->stats.full_slabs : &psset->stats.nonfull_slabs[old_pind]);
psset_bin_stats_deactivate(bin_stats, edata_hugeified_get(ps), len); psset_bin_stats_deactivate(bin_stats, hpdata_huge_get(ps), len);
edata_nfree_set(ps, (uint32_t)(edata_nfree_get(ps) + len)); hpdata_nfree_set(ps, (uint32_t)(hpdata_nfree_get(ps) + len));
/* We might have just created a new, larger range. */ /* We might have just created a new, larger range. */
size_t new_begin = (size_t)(fb_fls(ps_fb, ps_npages, begin) + 1); size_t new_begin = (size_t)(fb_fls(ps_fb, HUGEPAGE_PAGES, begin) + 1);
size_t new_end = fb_ffs(ps_fb, ps_npages, begin + len - 1); size_t new_end = fb_ffs(ps_fb, HUGEPAGE_PAGES, begin + len - 1);
size_t new_range_len = new_end - new_begin; size_t new_range_len = new_end - new_begin;
/* /*
* If the new free range is no longer than the previous longest one, * If the new free range is no longer than the previous longest one,
@ -352,7 +344,7 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
* Otherwise, it might need to get evicted from the set, or change its * Otherwise, it might need to get evicted from the set, or change its
* bin. * bin.
*/ */
edata_longest_free_range_set(ps, (uint32_t)new_range_len); hpdata_longest_free_range_set(ps, (uint32_t)new_range_len);
/* /*
* If it was previously non-full, then it's in some (possibly now * If it was previously non-full, then it's in some (possibly now
* incorrect) bin already; remove it. * incorrect) bin already; remove it.
@ -366,8 +358,8 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
* and the issue becomes moot). * and the issue becomes moot).
*/ */
if (ps_old_longest_free_range > 0) { if (ps_old_longest_free_range > 0) {
psset_edata_heap_remove(psset, old_pind, ps); psset_hpdata_heap_remove(psset, old_pind, ps);
if (edata_age_heap_empty(&psset->pageslabs[old_pind])) { if (hpdata_age_heap_empty(&psset->pageslabs[old_pind])) {
bitmap_set(psset->bitmap, &psset_bitmap_info, bitmap_set(psset->bitmap, &psset_bitmap_info,
(size_t)old_pind); (size_t)old_pind);
} }
@ -379,16 +371,16 @@ psset_dalloc(psset_t *psset, edata_t *edata) {
psset_bin_stats_remove(&psset->stats.full_slabs, ps); psset_bin_stats_remove(&psset->stats.full_slabs, ps);
} }
/* If the pageslab is empty, it gets evicted from the set. */ /* If the pageslab is empty, it gets evicted from the set. */
if (new_range_len == ps_npages) { if (new_range_len == HUGEPAGE_PAGES) {
return ps; return ps;
} }
/* Otherwise, it gets reinserted. */ /* Otherwise, it gets reinserted. */
pszind_t new_pind = sz_psz2ind(sz_psz_quantize_floor( pszind_t new_pind = sz_psz2ind(sz_psz_quantize_floor(
new_range_len << LG_PAGE)); new_range_len << LG_PAGE));
if (edata_age_heap_empty(&psset->pageslabs[new_pind])) { if (hpdata_age_heap_empty(&psset->pageslabs[new_pind])) {
bitmap_unset(psset->bitmap, &psset_bitmap_info, bitmap_unset(psset->bitmap, &psset_bitmap_info,
(size_t)new_pind); (size_t)new_pind);
} }
psset_edata_heap_insert(psset, new_pind, ps); psset_hpdata_heap_insert(psset, new_pind, ps);
return NULL; return NULL;
} }

View File

@ -38,7 +38,8 @@ create_test_data() {
assert_false(err, ""); assert_false(err, "");
err = hpa_shard_init(&test_data->shard, &test_data->emap, err = hpa_shard_init(&test_data->shard, &test_data->emap,
&test_data->shard_edata_cache, SHARD_IND, ALLOC_MAX); test_data->base, &test_data->shard_edata_cache, SHARD_IND,
ALLOC_MAX);
assert_false(err, ""); assert_false(err, "");
return (hpa_shard_t *)test_data; return (hpa_shard_t *)test_data;

View File

@ -2,10 +2,8 @@
#include "jemalloc/internal/psset.h" #include "jemalloc/internal/psset.h"
#define PAGESLAB_PAGES (HUGEPAGE / PAGE) #define PAGESLAB_ADDR ((void *)(1234 * HUGEPAGE))
#define PAGESLAB_SIZE (PAGESLAB_PAGES << LG_PAGE) #define PAGESLAB_AGE 5678
#define PAGESLAB_SN 123
#define PAGESLAB_ADDR ((void *)(1234 << LG_PAGE))
#define ALLOC_ARENA_IND 111 #define ALLOC_ARENA_IND 111
#define ALLOC_ESN 222 #define ALLOC_ESN 222
@ -42,14 +40,10 @@ edata_expect(edata_t *edata, size_t page_offset, size_t page_cnt) {
TEST_BEGIN(test_empty) { TEST_BEGIN(test_empty) {
bool err; bool err;
edata_t pageslab; hpdata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab)); hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
edata_t alloc;
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE, edata_t alloc;
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
edata_init_test(&alloc); edata_init_test(&alloc);
psset_t psset; psset_t psset;
@ -63,27 +57,24 @@ TEST_END
TEST_BEGIN(test_fill) { TEST_BEGIN(test_fill) {
bool err; bool err;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[PAGESLAB_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE, hpdata_t pageslab;
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active, hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD); edata_t alloc[HUGEPAGE_PAGES];
psset_t psset; psset_t psset;
psset_init(&psset); psset_init(&psset);
edata_init_test(&alloc[0]); edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE); psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) { for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
edata_init_test(&alloc[i]); edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE); err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation."); expect_false(err, "Nonempty psset failed page allocation.");
} }
for (size_t i = 0; i < PAGESLAB_PAGES; i++) { for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
edata_t *edata = &alloc[i]; edata_t *edata = &alloc[i];
edata_expect(edata, i, 1); edata_expect(edata, i, 1);
} }
@ -98,30 +89,26 @@ TEST_END
TEST_BEGIN(test_reuse) { TEST_BEGIN(test_reuse) {
bool err; bool err;
edata_t *ps; hpdata_t *ps;
edata_t pageslab; hpdata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab)); hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
edata_t alloc[PAGESLAB_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE, edata_t alloc[HUGEPAGE_PAGES];
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset; psset_t psset;
psset_init(&psset); psset_init(&psset);
edata_init_test(&alloc[0]); edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE); psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) { for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
edata_init_test(&alloc[i]); edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE); err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation."); expect_false(err, "Nonempty psset failed page allocation.");
} }
/* Free odd indices. */ /* Free odd indices. */
for (size_t i = 0; i < PAGESLAB_PAGES; i ++) { for (size_t i = 0; i < HUGEPAGE_PAGES; i ++) {
if (i % 2 == 0) { if (i % 2 == 0) {
continue; continue;
} }
@ -129,7 +116,7 @@ TEST_BEGIN(test_reuse) {
expect_ptr_null(ps, "Nonempty pageslab evicted"); expect_ptr_null(ps, "Nonempty pageslab evicted");
} }
/* Realloc into them. */ /* Realloc into them. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) { for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
if (i % 2 == 0) { if (i % 2 == 0) {
continue; continue;
} }
@ -138,7 +125,7 @@ TEST_BEGIN(test_reuse) {
edata_expect(&alloc[i], i, 1); edata_expect(&alloc[i], i, 1);
} }
/* Now, free the pages at indices 0 or 1 mod 2. */ /* Now, free the pages at indices 0 or 1 mod 2. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) { for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
if (i % 4 > 1) { if (i % 4 > 1) {
continue; continue;
} }
@ -146,7 +133,7 @@ TEST_BEGIN(test_reuse) {
expect_ptr_null(ps, "Nonempty pageslab evicted"); expect_ptr_null(ps, "Nonempty pageslab evicted");
} }
/* And realloc 2-page allocations into them. */ /* And realloc 2-page allocations into them. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) { for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
if (i % 4 != 0) { if (i % 4 != 0) {
continue; continue;
} }
@ -155,7 +142,7 @@ TEST_BEGIN(test_reuse) {
edata_expect(&alloc[i], i, 2); edata_expect(&alloc[i], i, 2);
} }
/* Free all the 2-page allocations. */ /* Free all the 2-page allocations. */
for (size_t i = 0; i < PAGESLAB_PAGES; i++) { for (size_t i = 0; i < HUGEPAGE_PAGES; i++) {
if (i % 4 != 0) { if (i % 4 != 0) {
continue; continue;
} }
@ -175,13 +162,13 @@ TEST_BEGIN(test_reuse) {
edata_expect(&alloc[index_of_3], index_of_3, 3); edata_expect(&alloc[index_of_3], index_of_3, 3);
/* Free up a 4-page hole at the end. */ /* Free up a 4-page hole at the end. */
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 1]); ps = psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
expect_ptr_null(ps, "Nonempty pageslab evicted"); expect_ptr_null(ps, "Nonempty pageslab evicted");
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 2]); ps = psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 2]);
expect_ptr_null(ps, "Nonempty pageslab evicted"); expect_ptr_null(ps, "Nonempty pageslab evicted");
/* Make sure we can satisfy an allocation at the very end of a slab. */ /* Make sure we can satisfy an allocation at the very end of a slab. */
size_t index_of_4 = PAGESLAB_PAGES - 4; size_t index_of_4 = HUGEPAGE_PAGES - 4;
ps = psset_dalloc(&psset, &alloc[index_of_4]); ps = psset_dalloc(&psset, &alloc[index_of_4]);
expect_ptr_null(ps, "Nonempty pageslab evicted"); expect_ptr_null(ps, "Nonempty pageslab evicted");
err = psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE); err = psset_alloc_reuse(&psset, &alloc[index_of_4], 4 * PAGE);
@ -192,33 +179,31 @@ TEST_END
TEST_BEGIN(test_evict) { TEST_BEGIN(test_evict) {
bool err; bool err;
edata_t *ps; hpdata_t *ps;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab)); hpdata_t pageslab;
edata_t alloc[PAGESLAB_PAGES]; hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
edata_t alloc[HUGEPAGE_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active,
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset; psset_t psset;
psset_init(&psset); psset_init(&psset);
/* Alloc the whole slab. */ /* Alloc the whole slab. */
edata_init_test(&alloc[0]); edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE); psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) { for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
edata_init_test(&alloc[i]); edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE); err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Unxpected allocation failure"); expect_false(err, "Unxpected allocation failure");
} }
/* Dealloc the whole slab, going forwards. */ /* Dealloc the whole slab, going forwards. */
for (size_t i = 0; i < PAGESLAB_PAGES - 1; i++) { for (size_t i = 0; i < HUGEPAGE_PAGES - 1; i++) {
ps = psset_dalloc(&psset, &alloc[i]); ps = psset_dalloc(&psset, &alloc[i]);
expect_ptr_null(ps, "Nonempty pageslab evicted"); expect_ptr_null(ps, "Nonempty pageslab evicted");
} }
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 1]); ps = psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted."); expect_ptr_eq(&pageslab, ps, "Empty pageslab not evicted.");
err = psset_alloc_reuse(&psset, &alloc[0], PAGE); err = psset_alloc_reuse(&psset, &alloc[0], PAGE);
@ -228,20 +213,15 @@ TEST_END
TEST_BEGIN(test_multi_pageslab) { TEST_BEGIN(test_multi_pageslab) {
bool err; bool err;
edata_t *ps; hpdata_t *ps;
edata_t pageslab[2];
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[2][PAGESLAB_PAGES];
edata_init(&pageslab[0], /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE, hpdata_t pageslab[2];
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active, hpdata_init(&pageslab[0], PAGESLAB_ADDR, PAGESLAB_AGE);
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA, hpdata_init(&pageslab[1],
EXTENT_IS_HEAD); (void *)((uintptr_t)PAGESLAB_ADDR + HUGEPAGE),
edata_init(&pageslab[1], /* arena_ind */ 0, PAGESLAB_AGE + 1);
(void *)((uintptr_t)PAGESLAB_ADDR + PAGESLAB_SIZE), PAGESLAB_SIZE,
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active, edata_t alloc[2][HUGEPAGE_PAGES];
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD);
psset_t psset; psset_t psset;
psset_init(&psset); psset_init(&psset);
@ -254,7 +234,7 @@ TEST_BEGIN(test_multi_pageslab) {
/* Fill them both up; make sure we do so in first-fit order. */ /* Fill them both up; make sure we do so in first-fit order. */
for (size_t i = 0; i < 2; i++) { for (size_t i = 0; i < 2; i++) {
for (size_t j = 1; j < PAGESLAB_PAGES; j++) { for (size_t j = 1; j < HUGEPAGE_PAGES; j++) {
edata_init_test(&alloc[i][j]); edata_init_test(&alloc[i][j]);
err = psset_alloc_reuse(&psset, &alloc[i][j], PAGE); err = psset_alloc_reuse(&psset, &alloc[i][j], PAGE);
expect_false(err, expect_false(err,
@ -306,10 +286,10 @@ stats_expect_empty(psset_bin_stats_t *stats) {
static void static void
stats_expect(psset_t *psset, size_t nactive) { stats_expect(psset_t *psset, size_t nactive) {
if (nactive == PAGESLAB_PAGES) { if (nactive == HUGEPAGE_PAGES) {
expect_zu_eq(1, psset->stats.full_slabs.npageslabs_nonhuge, expect_zu_eq(1, psset->stats.full_slabs.npageslabs_nonhuge,
"Expected a full slab"); "Expected a full slab");
expect_zu_eq(PAGESLAB_PAGES, expect_zu_eq(HUGEPAGE_PAGES,
psset->stats.full_slabs.nactive_nonhuge, psset->stats.full_slabs.nactive_nonhuge,
"Should have exactly filled the bin"); "Should have exactly filled the bin");
expect_zu_eq(0, psset->stats.full_slabs.ninactive_nonhuge, expect_zu_eq(0, psset->stats.full_slabs.ninactive_nonhuge,
@ -317,9 +297,9 @@ stats_expect(psset_t *psset, size_t nactive) {
} else { } else {
stats_expect_empty(&psset->stats.full_slabs); stats_expect_empty(&psset->stats.full_slabs);
} }
size_t ninactive = PAGESLAB_PAGES - nactive; size_t ninactive = HUGEPAGE_PAGES - nactive;
pszind_t nonempty_pind = PSSET_NPSIZES; pszind_t nonempty_pind = PSSET_NPSIZES;
if (ninactive != 0 && ninactive < PAGESLAB_PAGES) { if (ninactive != 0 && ninactive < HUGEPAGE_PAGES) {
nonempty_pind = sz_psz2ind(sz_psz_quantize_floor( nonempty_pind = sz_psz2ind(sz_psz_quantize_floor(
ninactive << LG_PAGE)); ninactive << LG_PAGE));
} }
@ -342,14 +322,11 @@ stats_expect(psset_t *psset, size_t nactive) {
TEST_BEGIN(test_stats) { TEST_BEGIN(test_stats) {
bool err; bool err;
edata_t pageslab;
memset(&pageslab, 0, sizeof(pageslab));
edata_t alloc[PAGESLAB_PAGES];
edata_init(&pageslab, /* arena_ind */ 0, PAGESLAB_ADDR, PAGESLAB_SIZE, hpdata_t pageslab;
/* slab */ true, SC_NSIZES, PAGESLAB_SN, extent_state_active, hpdata_init(&pageslab, PAGESLAB_ADDR, PAGESLAB_AGE);
/* zeroed */ false, /* comitted */ true, EXTENT_PAI_HPA,
EXTENT_IS_HEAD); edata_t alloc[HUGEPAGE_PAGES];
psset_t psset; psset_t psset;
psset_init(&psset); psset_init(&psset);
@ -357,15 +334,15 @@ TEST_BEGIN(test_stats) {
edata_init_test(&alloc[0]); edata_init_test(&alloc[0]);
psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE); psset_alloc_new(&psset, &pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) { for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
stats_expect(&psset, i); stats_expect(&psset, i);
edata_init_test(&alloc[i]); edata_init_test(&alloc[i]);
err = psset_alloc_reuse(&psset, &alloc[i], PAGE); err = psset_alloc_reuse(&psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation."); expect_false(err, "Nonempty psset failed page allocation.");
} }
stats_expect(&psset, PAGESLAB_PAGES); stats_expect(&psset, HUGEPAGE_PAGES);
edata_t *ps; hpdata_t *ps;
for (ssize_t i = PAGESLAB_PAGES - 1; i >= 0; i--) { for (ssize_t i = HUGEPAGE_PAGES - 1; i >= 0; i--) {
ps = psset_dalloc(&psset, &alloc[i]); ps = psset_dalloc(&psset, &alloc[i]);
expect_true((ps == NULL) == (i != 0), expect_true((ps == NULL) == (i != 0),
"psset_dalloc should only evict a slab on the last free"); "psset_dalloc should only evict a slab on the last free");
@ -384,37 +361,28 @@ TEST_END
/* /*
* Fills in and inserts two pageslabs, with the first better than the second, * Fills in and inserts two pageslabs, with the first better than the second,
* and each fully allocated (into the allocations in allocs and worse_allocs, * and each fully allocated (into the allocations in allocs and worse_allocs,
* each of which should be PAGESLAB_PAGES long). * each of which should be HUGEPAGE_PAGES long).
* *
* (There's nothing magic about these numbers; it's just useful to share the * (There's nothing magic about these numbers; it's just useful to share the
* setup between the oldest fit and the insert/remove test). * setup between the oldest fit and the insert/remove test).
*/ */
static void static void
init_test_pageslabs(psset_t *psset, edata_t *pageslab, edata_t *worse_pageslab, init_test_pageslabs(psset_t *psset, hpdata_t *pageslab,
edata_t *alloc, edata_t *worse_alloc) { hpdata_t *worse_pageslab, edata_t *alloc, edata_t *worse_alloc) {
bool err; bool err;
memset(pageslab, 0, sizeof(*pageslab));
edata_init(pageslab, /* arena_ind */ 0, (void *)(10 * PAGESLAB_SIZE),
PAGESLAB_SIZE, /* slab */ true, SC_NSIZES, PAGESLAB_SN + 1,
extent_state_active, /* zeroed */ false, /* comitted */ true,
EXTENT_PAI_HPA, EXTENT_IS_HEAD);
hpdata_init(pageslab, (void *)(10 * HUGEPAGE), PAGESLAB_AGE);
/* /*
* This pageslab is better from an edata_comp_snad POV, but will be * This pageslab would be better from an address-first-fit POV, but
* added to the set after the previous one, and so should be less * better from an age POV.
* preferred for allocations.
*/ */
memset(worse_pageslab, 0, sizeof(*worse_pageslab)); hpdata_init(worse_pageslab, (void *)(9 * HUGEPAGE), PAGESLAB_AGE + 1);
edata_init(worse_pageslab, /* arena_ind */ 0,
(void *)(9 * PAGESLAB_SIZE), PAGESLAB_SIZE, /* slab */ true,
SC_NSIZES, PAGESLAB_SN - 1, extent_state_active, /* zeroed */ false,
/* comitted */ true, EXTENT_PAI_HPA, EXTENT_IS_HEAD);
psset_init(psset); psset_init(psset);
edata_init_test(&alloc[0]); edata_init_test(&alloc[0]);
psset_alloc_new(psset, pageslab, &alloc[0], PAGE); psset_alloc_new(psset, pageslab, &alloc[0], PAGE);
for (size_t i = 1; i < PAGESLAB_PAGES; i++) { for (size_t i = 1; i < HUGEPAGE_PAGES; i++) {
edata_init_test(&alloc[i]); edata_init_test(&alloc[i]);
err = psset_alloc_reuse(psset, &alloc[i], PAGE); err = psset_alloc_reuse(psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation."); expect_false(err, "Nonempty psset failed page allocation.");
@ -430,7 +398,7 @@ init_test_pageslabs(psset_t *psset, edata_t *pageslab, edata_t *worse_pageslab,
* Make the two pssets otherwise indistinguishable; all full except for * Make the two pssets otherwise indistinguishable; all full except for
* a single page. * a single page.
*/ */
for (size_t i = 1; i < PAGESLAB_PAGES - 1; i++) { for (size_t i = 1; i < HUGEPAGE_PAGES - 1; i++) {
edata_init_test(&worse_alloc[i]); edata_init_test(&worse_alloc[i]);
err = psset_alloc_reuse(psset, &alloc[i], PAGE); err = psset_alloc_reuse(psset, &alloc[i], PAGE);
expect_false(err, "Nonempty psset failed page allocation."); expect_false(err, "Nonempty psset failed page allocation.");
@ -439,17 +407,17 @@ init_test_pageslabs(psset_t *psset, edata_t *pageslab, edata_t *worse_pageslab,
} }
/* Deallocate the last page from the older pageslab. */ /* Deallocate the last page from the older pageslab. */
edata_t *evicted = psset_dalloc(psset, &alloc[PAGESLAB_PAGES - 1]); hpdata_t *evicted = psset_dalloc(psset, &alloc[HUGEPAGE_PAGES - 1]);
expect_ptr_null(evicted, "Unexpected eviction"); expect_ptr_null(evicted, "Unexpected eviction");
} }
TEST_BEGIN(test_oldest_fit) { TEST_BEGIN(test_oldest_fit) {
bool err; bool err;
edata_t alloc[PAGESLAB_PAGES]; edata_t alloc[HUGEPAGE_PAGES];
edata_t worse_alloc[PAGESLAB_PAGES]; edata_t worse_alloc[HUGEPAGE_PAGES];
edata_t pageslab; hpdata_t pageslab;
edata_t worse_pageslab; hpdata_t worse_pageslab;
psset_t psset; psset_t psset;
@ -468,12 +436,12 @@ TEST_END
TEST_BEGIN(test_insert_remove) { TEST_BEGIN(test_insert_remove) {
bool err; bool err;
edata_t *ps; hpdata_t *ps;
edata_t alloc[PAGESLAB_PAGES]; edata_t alloc[HUGEPAGE_PAGES];
edata_t worse_alloc[PAGESLAB_PAGES]; edata_t worse_alloc[HUGEPAGE_PAGES];
edata_t pageslab; hpdata_t pageslab;
edata_t worse_pageslab; hpdata_t worse_pageslab;
psset_t psset; psset_t psset;
@ -482,31 +450,31 @@ TEST_BEGIN(test_insert_remove) {
/* Remove better; should still be able to alloc from worse. */ /* Remove better; should still be able to alloc from worse. */
psset_remove(&psset, &pageslab); psset_remove(&psset, &pageslab);
err = psset_alloc_reuse(&psset, &worse_alloc[PAGESLAB_PAGES - 1], PAGE); err = psset_alloc_reuse(&psset, &worse_alloc[HUGEPAGE_PAGES - 1], PAGE);
expect_false(err, "Removal should still leave an empty page"); expect_false(err, "Removal should still leave an empty page");
expect_ptr_eq(&worse_pageslab, expect_ptr_eq(&worse_pageslab,
edata_ps_get(&worse_alloc[PAGESLAB_PAGES - 1]), edata_ps_get(&worse_alloc[HUGEPAGE_PAGES - 1]),
"Allocated out of wrong ps"); "Allocated out of wrong ps");
/* /*
* After deallocating the previous alloc and reinserting better, it * After deallocating the previous alloc and reinserting better, it
* should be preferred for future allocations. * should be preferred for future allocations.
*/ */
ps = psset_dalloc(&psset, &worse_alloc[PAGESLAB_PAGES - 1]); ps = psset_dalloc(&psset, &worse_alloc[HUGEPAGE_PAGES - 1]);
expect_ptr_null(ps, "Incorrect eviction of nonempty pageslab"); expect_ptr_null(ps, "Incorrect eviction of nonempty pageslab");
psset_insert(&psset, &pageslab); psset_insert(&psset, &pageslab);
err = psset_alloc_reuse(&psset, &alloc[PAGESLAB_PAGES - 1], PAGE); err = psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
expect_false(err, "psset should be nonempty"); expect_false(err, "psset should be nonempty");
expect_ptr_eq(&pageslab, edata_ps_get(&alloc[PAGESLAB_PAGES - 1]), expect_ptr_eq(&pageslab, edata_ps_get(&alloc[HUGEPAGE_PAGES - 1]),
"Removal/reinsertion shouldn't change ordering"); "Removal/reinsertion shouldn't change ordering");
/* /*
* After deallocating and removing both, allocations should fail. * After deallocating and removing both, allocations should fail.
*/ */
ps = psset_dalloc(&psset, &alloc[PAGESLAB_PAGES - 1]); ps = psset_dalloc(&psset, &alloc[HUGEPAGE_PAGES - 1]);
expect_ptr_null(ps, "Incorrect eviction"); expect_ptr_null(ps, "Incorrect eviction");
psset_remove(&psset, &pageslab); psset_remove(&psset, &pageslab);
psset_remove(&psset, &worse_pageslab); psset_remove(&psset, &worse_pageslab);
err = psset_alloc_reuse(&psset, &alloc[PAGESLAB_PAGES - 1], PAGE); err = psset_alloc_reuse(&psset, &alloc[HUGEPAGE_PAGES - 1], PAGE);
expect_true(err, "psset should be empty, but an alloc succeeded"); expect_true(err, "psset should be empty, but an alloc succeeded");
} }
TEST_END TEST_END