Emap: Move in szind and slab modifications.
This commit is contained in:
parent
9b5d105fc3
commit
65a54d7714
@ -79,7 +79,13 @@ struct emap_prepare_s {
|
|||||||
* is going to be used as a slab, you still need to call emap_register_interior
|
* is going to be used as a slab, you still need to call emap_register_interior
|
||||||
* on it, though.
|
* on it, though.
|
||||||
*
|
*
|
||||||
* Each operation has a "prepare" and a "commit" portion. The prepare portion
|
* Remap simply changes the szind and slab status of an extent's boundary
|
||||||
|
* mappings. If the extent is not a slab, it doesn't bother with updating the
|
||||||
|
* end mapping (since lookups only occur in the interior of an extent for
|
||||||
|
* slabs). Since the szind and slab status only make sense for active extents,
|
||||||
|
* this should only be called while activating or deactivating an extent.
|
||||||
|
*
|
||||||
|
* Split and merge have a "prepare" and a "commit" portion. The prepare portion
|
||||||
* does the operations that can be done without exclusive access to the extent
|
* does the operations that can be done without exclusive access to the extent
|
||||||
* in question, while the commit variant requires exclusive access to maintain
|
* in question, while the commit variant requires exclusive access to maintain
|
||||||
* the emap invariants. The only function that can fail is emap_split_prepare,
|
* the emap invariants. The only function that can fail is emap_split_prepare,
|
||||||
@ -90,8 +96,8 @@ struct emap_prepare_s {
|
|||||||
* and esn values) data for the split variants, and can be reused for any
|
* and esn values) data for the split variants, and can be reused for any
|
||||||
* purpose by its given arena after a merge or a failed split.
|
* purpose by its given arena after a merge or a failed split.
|
||||||
*/
|
*/
|
||||||
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, size_t size,
|
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
|
||||||
szind_t szind, bool slab);
|
bool slab);
|
||||||
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
|
||||||
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a, edata_t *trail,
|
edata_t *edata, size_t size_a, szind_t szind_a, bool slab_a, edata_t *trail,
|
||||||
size_t size_b, szind_t szind_b, bool slab_b);
|
size_t size_b, szind_t szind_b, bool slab_b);
|
||||||
|
16
src/arena.c
16
src/arena.c
@ -1599,16 +1599,10 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
|
|||||||
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
|
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
|
||||||
}
|
}
|
||||||
|
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
edata_t *edata = emap_lookup(tsdn, &emap_global, ptr);
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
|
|
||||||
edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, true);
|
|
||||||
|
|
||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
edata_szind_set(edata, szind);
|
emap_remap(tsdn, &emap_global, edata, szind, false);
|
||||||
rtree_szind_slab_update(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, szind, false);
|
|
||||||
|
|
||||||
prof_idump_rollback(tsdn, usize);
|
prof_idump_rollback(tsdn, usize);
|
||||||
|
|
||||||
@ -1620,11 +1614,7 @@ arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
|
|||||||
cassert(config_prof);
|
cassert(config_prof);
|
||||||
assert(ptr != NULL);
|
assert(ptr != NULL);
|
||||||
|
|
||||||
edata_szind_set(edata, SC_NBINS);
|
emap_remap(tsdn, &emap_global, edata, SC_NBINS, false);
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
rtree_szind_slab_update(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)ptr, SC_NBINS, false);
|
|
||||||
|
|
||||||
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
|
||||||
|
|
||||||
|
16
src/emap.c
16
src/emap.c
@ -199,14 +199,26 @@ emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, size_t size,
|
void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
|
||||||
szind_t szind, bool slab) {
|
bool slab) {
|
||||||
EMAP_DECLARE_RTREE_CTX;
|
EMAP_DECLARE_RTREE_CTX;
|
||||||
|
|
||||||
edata_szind_set(edata, szind);
|
edata_szind_set(edata, szind);
|
||||||
if (szind != SC_NSIZES) {
|
if (szind != SC_NSIZES) {
|
||||||
rtree_szind_slab_update(tsdn, &emap->rtree, rtree_ctx,
|
rtree_szind_slab_update(tsdn, &emap->rtree, rtree_ctx,
|
||||||
(uintptr_t)edata_addr_get(edata), szind, slab);
|
(uintptr_t)edata_addr_get(edata), szind, slab);
|
||||||
|
/*
|
||||||
|
* Recall that this is called only for active->inactive and
|
||||||
|
* inactive->active transitions (since only active extents have
|
||||||
|
* meaningful values for szind and slab). Active, non-slab
|
||||||
|
* extents only need to handle lookups at their head (on
|
||||||
|
* deallocation), so we don't bother filling in the end
|
||||||
|
* boundary.
|
||||||
|
*
|
||||||
|
* For slab extents, we do the end-mapping change. This still
|
||||||
|
* leaves the interior unmodified; an emap_register_interior
|
||||||
|
* call is coming in those cases, though.
|
||||||
|
*/
|
||||||
if (slab && edata_size_get(edata) > PAGE) {
|
if (slab && edata_size_get(edata) > PAGE) {
|
||||||
rtree_szind_slab_update(tsdn,
|
rtree_szind_slab_update(tsdn,
|
||||||
&emap->rtree, rtree_ctx,
|
&emap->rtree, rtree_ctx,
|
||||||
|
@ -516,7 +516,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (leadsize == 0 && trailsize == 0) {
|
if (leadsize == 0 && trailsize == 0) {
|
||||||
emap_remap(tsdn, &emap_global, *edata, size, szind, slab);
|
emap_remap(tsdn, &emap_global, *edata, szind, slab);
|
||||||
}
|
}
|
||||||
|
|
||||||
return extent_split_interior_ok;
|
return extent_split_interior_ok;
|
||||||
|
@ -3,10 +3,10 @@
|
|||||||
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
#include "jemalloc/internal/jemalloc_internal_includes.h"
|
||||||
|
|
||||||
#include "jemalloc/internal/assert.h"
|
#include "jemalloc/internal/assert.h"
|
||||||
|
#include "jemalloc/internal/emap.h"
|
||||||
#include "jemalloc/internal/extent_mmap.h"
|
#include "jemalloc/internal/extent_mmap.h"
|
||||||
#include "jemalloc/internal/mutex.h"
|
#include "jemalloc/internal/mutex.h"
|
||||||
#include "jemalloc/internal/prof_recent.h"
|
#include "jemalloc/internal/prof_recent.h"
|
||||||
#include "jemalloc/internal/rtree.h"
|
|
||||||
#include "jemalloc/internal/util.h"
|
#include "jemalloc/internal/util.h"
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
@ -175,12 +175,9 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
|
|||||||
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
|
extent_dalloc_wrapper(tsdn, arena, ehooks, trail);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
rtree_ctx_t rtree_ctx_fallback;
|
|
||||||
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
|
|
||||||
szind_t szind = sz_size2index(usize);
|
szind_t szind = sz_size2index(usize);
|
||||||
edata_szind_set(edata, szind);
|
emap_remap(tsdn, &emap_global, edata, szind, false);
|
||||||
rtree_szind_slab_update(tsdn, &emap_global.rtree, rtree_ctx,
|
|
||||||
(uintptr_t)edata_addr_get(edata), szind, false);
|
|
||||||
|
|
||||||
if (config_stats && new_mapping) {
|
if (config_stats && new_mapping) {
|
||||||
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
|
||||||
|
Loading…
Reference in New Issue
Block a user