HPA: Pull out a hooks type.

For now, this is a no-op change.  In a subsequent commit, it will be useful for
testing.
This commit is contained in:
David Goldblatt 2021-06-14 14:18:08 -07:00 committed by David Goldblatt
parent 1d4a7666d5
commit 113938b6f4
14 changed files with 100 additions and 29 deletions

View File

@ -122,6 +122,7 @@ C_SRCS := $(srcroot)src/jemalloc.c \
$(srcroot)src/hook.c \
$(srcroot)src/hpa.c \
$(srcroot)src/hpa_central.c \
$(srcroot)src/hpa_hooks.c \
$(srcroot)src/hpdata.c \
$(srcroot)src/inspect.c \
$(srcroot)src/large.c \

View File

@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_HPA_H
#include "jemalloc/internal/exp_grow.h"
#include "jemalloc/internal/hpa_hooks.h"
#include "jemalloc/internal/hpa_opts.h"
#include "jemalloc/internal/pai.h"
#include "jemalloc/internal/psset.h"
@ -56,6 +57,14 @@ struct hpa_shard_s {
/* The base metadata allocator. */
base_t *base;
/*
* The HPA hooks for this shard. Eventually, once we have the
* hpa_central_t back, these should live there (since it doesn't make
* sense for different shards on the same hpa_central_t to have
* different hooks).
*/
hpa_hooks_t hooks;
/*
* This edata cache is the one we use when allocating a small extent
* from a pageslab. The pageslab itself comes from the centralized
@ -109,7 +118,8 @@ struct hpa_shard_s {
*/
bool hpa_supported();
bool hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
edata_cache_t *edata_cache, unsigned ind, const hpa_shard_opts_t *opts);
edata_cache_t *edata_cache, unsigned ind, const hpa_hooks_t *hooks,
const hpa_shard_opts_t *opts);
void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,

View File

@ -0,0 +1,15 @@
#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
#define JEMALLOC_INTERNAL_HPA_HOOKS_H
typedef struct hpa_hooks_s hpa_hooks_t;
struct hpa_hooks_s {
void *(*map)(size_t size);
void (*unmap)(void *ptr, size_t size);
void (*purge)(void *ptr, size_t size);
void (*hugify)(void *ptr, size_t size);
void (*dehugify)(void *ptr, size_t size);
};
extern hpa_hooks_t hpa_hooks_default;
#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */

View File

@ -131,7 +131,9 @@ bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
* that we can boot without worrying about the HPA, then turn it on in a0.
*/
bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
const hpa_hooks_t *hpa_hooks, const hpa_shard_opts_t *hpa_opts,
const sec_opts_t *hpa_sec_opts);
/*
* We stop using the HPA when custom extent hooks are installed, but still
* redirect deallocations to it.

View File

@ -62,6 +62,7 @@
<ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\hpa.c" />
<ClCompile Include="..\..\..\..\src\hpa_central.c" />
<ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
<ClCompile Include="..\..\..\..\src\hpdata.c" />
<ClCompile Include="..\..\..\..\src\inspect.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />

View File

@ -70,6 +70,9 @@
<ClCompile Include="..\..\..\..\src\hpa_central.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpa_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpdata.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -62,6 +62,7 @@
<ClCompile Include="..\..\..\..\src\hook.c" />
<ClCompile Include="..\..\..\..\src\hpa.c" />
<ClCompile Include="..\..\..\..\src\hpa_central.c" />
<ClCompile Include="..\..\..\..\src\hpa_hooks.c" />
<ClCompile Include="..\..\..\..\src\hpdata.c" />
<ClCompile Include="..\..\..\..\src\inspect.c" />
<ClCompile Include="..\..\..\..\src\jemalloc.c" />

View File

@ -70,6 +70,9 @@
<ClCompile Include="..\..\..\..\src\hpa_central.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpa_hooks.c">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="..\..\..\..\src\hpdata.c">
<Filter>Source Files</Filter>
</ClCompile>

View File

@ -1574,8 +1574,8 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
hpa_shard_opts.deferral_allowed = background_thread_enabled();
if (pa_shard_enable_hpa(tsdn, &arena->pa_shard, &hpa_shard_opts,
&opt_hpa_sec_opts)) {
if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
&hpa_hooks_default, &hpa_shard_opts, &opt_hpa_sec_opts)) {
goto label_error;
}
}

View File

@ -52,7 +52,8 @@ hpa_supported() {
bool
hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
edata_cache_t *edata_cache, unsigned ind, const hpa_shard_opts_t *opts) {
edata_cache_t *edata_cache, unsigned ind,
const hpa_hooks_t *hooks, const hpa_shard_opts_t *opts) {
/* malloc_conf processing should have filtered out these cases. */
assert(hpa_supported());
bool err;
@ -69,6 +70,7 @@ hpa_shard_init(hpa_shard_t *shard, emap_t *emap, base_t *base,
assert(edata_cache != NULL);
shard->base = base;
shard->hooks = *hooks;
edata_cache_small_init(&shard->ecs, edata_cache);
psset_init(&shard->psset);
shard->age_counter = 0;
@ -251,20 +253,14 @@ hpa_grow(tsdn_t *tsdn, hpa_shard_t *shard) {
* allocate an edata_t for the new psset.
*/
if (shard->eden == NULL) {
/*
* During development, we're primarily concerned with systems
* with overcommit. Eventually, we should be more careful here.
*/
bool commit = true;
/* Allocate address space, bailing if we fail. */
void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
&commit);
void *new_eden = shard->hooks.map(HPA_EDEN_SIZE);
if (new_eden == NULL) {
return NULL;
}
ps = hpa_alloc_ps(tsdn, shard);
if (ps == NULL) {
pages_unmap(new_eden, HPA_EDEN_SIZE);
shard->hooks.unmap(new_eden, HPA_EDEN_SIZE);
return NULL;
}
shard->eden = new_eden;
@ -335,7 +331,7 @@ hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
/* Actually do the purging, now that the lock is dropped. */
if (dehugify) {
pages_nohuge(hpdata_addr_get(to_purge), HUGEPAGE);
shard->hooks.dehugify(hpdata_addr_get(to_purge), HUGEPAGE);
}
size_t total_purged = 0;
uint64_t purges_this_pass = 0;
@ -346,7 +342,7 @@ hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
total_purged += purge_size;
assert(total_purged <= HUGEPAGE);
purges_this_pass++;
pages_purge_forced(purge_addr, purge_size);
shard->hooks.purge(purge_addr, purge_size);
}
malloc_mutex_lock(tsdn, &shard->mtx);
@ -404,15 +400,7 @@ hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
malloc_mutex_unlock(tsdn, &shard->mtx);
bool err = pages_huge(hpdata_addr_get(to_hugify),
HUGEPAGE);
/*
* It's not clear what we could do in case of error; we
* might get into situations where we loop trying to
* hugify some page and failing over and over again.
* Just eat the error and pretend we were successful.
*/
(void)err;
shard->hooks.hugify(hpdata_addr_get(to_hugify), HUGEPAGE);
malloc_mutex_lock(tsdn, &shard->mtx);
shard->stats.nhugifies++;
@ -808,7 +796,7 @@ hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
/* There should be no allocations anywhere. */
assert(hpdata_empty(ps));
psset_remove(&shard->psset, ps);
pages_unmap(hpdata_addr_get(ps), HUGEPAGE);
shard->hooks.unmap(hpdata_addr_get(ps), HUGEPAGE);
}
}

46
src/hpa_hooks.c Normal file
View File

@ -0,0 +1,46 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/hpa_hooks.h"
static void *hpa_hooks_map(size_t size);
static void hpa_hooks_unmap(void *ptr, size_t size);
static void hpa_hooks_purge(void *ptr, size_t size);
static void hpa_hooks_hugify(void *ptr, size_t size);
static void hpa_hooks_dehugify(void *ptr, size_t size);
hpa_hooks_t hpa_hooks_default = {
&hpa_hooks_map,
&hpa_hooks_unmap,
&hpa_hooks_purge,
&hpa_hooks_hugify,
&hpa_hooks_dehugify,
};
static void *
hpa_hooks_map(size_t size) {
bool commit = true;
return pages_map(NULL, size, HUGEPAGE, &commit);
}
static void
hpa_hooks_unmap(void *ptr, size_t size) {
pages_unmap(ptr, size);
}
static void
hpa_hooks_purge(void *ptr, size_t size) {
pages_purge_forced(ptr, size);
}
static void
hpa_hooks_hugify(void *ptr, size_t size) {
bool err = pages_huge(ptr, size);
(void)err;
}
static void
hpa_hooks_dehugify(void *ptr, size_t size) {
bool err = pages_nohuge(ptr, size);
(void)err;
}

View File

@ -1800,7 +1800,7 @@ malloc_init_hard_a0_locked() {
hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
hpa_shard_opts.deferral_allowed = background_thread_enabled();
if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
&hpa_shard_opts, &opt_hpa_sec_opts)) {
&hpa_hooks_default, &hpa_shard_opts, &opt_hpa_sec_opts)) {
return true;
}
}

View File

@ -50,9 +50,10 @@ pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, emap_t *emap, base_t *base,
bool
pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
const hpa_hooks_t *hpa_hooks, const hpa_shard_opts_t *hpa_opts,
const sec_opts_t *hpa_sec_opts) {
if (hpa_shard_init(&shard->hpa_shard, shard->emap, shard->base,
&shard->edata_cache, shard->ind, hpa_opts)) {
&shard->edata_cache, shard->ind, hpa_hooks, hpa_opts)) {
return true;
}
if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,

View File

@ -42,7 +42,7 @@ create_test_data() {
err = hpa_shard_init(&test_data->shard, &test_data->emap,
test_data->base, &test_data->shard_edata_cache, SHARD_IND,
&opts);
&hpa_hooks_default, &opts);
assert_false(err, "");
return (hpa_shard_t *)test_data;