Emap: Move in slab interior registration.

This commit is contained in:
David Goldblatt 2020-01-30 13:32:38 -08:00 committed by David Goldblatt
parent d05b61db4a
commit 9b5ca0b09d
3 changed files with 41 additions and 16 deletions

View File

@ -43,4 +43,26 @@ void emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap,
bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
edata_t *edata, szind_t szind, bool slab);
/*
* Does the same thing, but with the interior of the range, for slab
* allocations.
*
* You might wonder why we don't just have a single emap_register function that
* does both depending on the value of 'slab'. The answer is twofold:
* - As a practical matter, in places like the extract->split->commit pathway,
* we defer the interior operation until we're sure that the commit won't fail
* (but we have to register the split boundaries there).
* - In general, we're trying to move to a world where the page-specific
* allocator doesn't know as much about how the pages it allocates will be
* used, and passing a 'slab' parameter everywhere makes that more
* complicated.
*
* Unlike the boundary version, this function can't fail; this is because slabs
* can't get big enough to touch a new page that neither of the boundaries
* touched, so no allocation is necessary to fill the interior once the boundary
* has been touched.
*/
void emap_register_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
edata_t *edata, szind_t szind);
#endif /* JEMALLOC_INTERNAL_EMAP_H */

View File

@ -148,3 +148,16 @@ emap_register_boundary(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
return false;
}
void
emap_register_interior(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
edata_t *edata, szind_t szind) {
assert(edata_slab_get(edata));
/* Register interior. */
for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &emap->rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + (uintptr_t)(i <<
LG_PAGE), edata, szind, true);
}
}

View File

@ -270,19 +270,6 @@ extent_activate_locked(tsdn_t *tsdn, ecache_t *ecache, edata_t *edata) {
edata_state_set(edata, extent_state_active);
}
static void
extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, edata_t *edata,
szind_t szind) {
assert(edata_slab_get(edata));
/* Register interior. */
for (size_t i = 1; i < (edata_size_get(edata) >> LG_PAGE) - 1; i++) {
rtree_write(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)edata_base_get(edata) + (uintptr_t)(i <<
LG_PAGE), edata, szind, true);
}
}
static void
extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
@ -341,7 +328,8 @@ extent_register_impl(tsdn_t *tsdn, edata_t *edata, bool gdump_add) {
}
if (slab) {
extent_interior_register(tsdn, rtree_ctx, edata, szind);
emap_register_interior(tsdn, &emap_global, rtree_ctx, edata,
szind);
}
emap_unlock_edata(tsdn, &emap_global, edata);
@ -704,7 +692,8 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks, ecache_t *ecache,
assert(edata_state_get(edata) == extent_state_active);
if (slab) {
edata_slab_set(edata, slab);
extent_interior_register(tsdn, rtree_ctx, edata, szind);
emap_register_interior(tsdn, &emap_global, rtree_ctx, edata,
szind);
}
if (*zero) {
@ -867,7 +856,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
&rtree_ctx_fallback);
edata_slab_set(edata, true);
extent_interior_register(tsdn, rtree_ctx, edata, szind);
emap_register_interior(tsdn, &emap_global, rtree_ctx, edata,
szind);
}
if (*zero && !edata_zeroed_get(edata)) {
void *addr = edata_base_get(edata);