Add emap, for tracking extent locking.

This commit is contained in:
David Goldblatt
2020-01-27 13:55:46 -08:00
committed by David Goldblatt
parent 0f686e82a3
commit 01f255161c
17 changed files with 257 additions and 197 deletions

View File

@@ -188,7 +188,7 @@ arena_salloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
szind_t szind = rtree_szind_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, true);
assert(szind != SC_NSIZES);
@@ -211,7 +211,7 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
edata_t *edata;
szind_t szind;
if (rtree_edata_szind_read(tsdn, &extents_rtree, rtree_ctx,
if (rtree_edata_szind_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, false, &edata, &szind)) {
return 0;
}
@@ -247,11 +247,11 @@ arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
szind_t szind;
bool slab;
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
true, &szind, &slab);
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
if (config_debug) {
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == edata_szind_get(edata));
assert(szind < SC_NSIZES);
@@ -302,13 +302,13 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
assert(szind != SC_NSIZES);
} else {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
}
if (config_debug) {
rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
edata_t *edata = rtree_edata_read(tsdn, &extents_rtree,
edata_t *edata = rtree_edata_read(tsdn, &emap_global.rtree,
rtree_ctx, (uintptr_t)ptr, true);
assert(szind == edata_szind_get(edata));
assert(szind < SC_NSIZES);
@@ -345,7 +345,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
assert(szind == sz_size2index(size));
@@ -353,7 +353,7 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
if (config_debug) {
edata_t *edata = rtree_edata_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
&emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == edata_szind_get(edata));
assert(slab == edata_slab_get(edata));
}
@@ -388,8 +388,8 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
&rtree_ctx_fallback);
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
(uintptr_t)ptr, true, &local_ctx.szind,
rtree_szind_slab_read(tsdn, &emap_global.rtree,
rtree_ctx, (uintptr_t)ptr, true, &local_ctx.szind,
&local_ctx.slab);
assert(local_ctx.szind == sz_size2index(size));
alloc_ctx = &local_ctx;
@@ -407,10 +407,10 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
if (config_debug) {
rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
rtree_szind_slab_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, true, &szind, &slab);
edata_t *edata = rtree_edata_read(tsdn,
&extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
&emap_global.rtree, rtree_ctx, (uintptr_t)ptr, true);
assert(szind == edata_szind_get(edata));
assert(slab == edata_slab_get(edata));
}

View File

@@ -0,0 +1,33 @@
#ifndef JEMALLOC_INTERNAL_EMAP_H
#define JEMALLOC_INTERNAL_EMAP_H
#include "jemalloc/internal/mutex_pool.h"
#include "jemalloc/internal/rtree.h"
typedef struct emap_s emap_t;
struct emap_s {
rtree_t rtree;
/* Keyed by the address of the edata_t being protected. */
mutex_pool_t mtx_pool;
};
extern emap_t emap_global;
bool emap_init(emap_t *emap);
void emap_lock_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
void emap_unlock_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
void emap_lock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1,
edata_t *edata2);
void emap_unlock_edata2(tsdn_t *tsdn, emap_t *emap, edata_t *edata1,
edata_t *edata2);
edata_t *emap_lock_edata_from_addr(tsdn_t *tsdn, emap_t *emap,
rtree_ctx_t *rtree_ctx, void *addr, bool inactive_only);
bool emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap,
rtree_ctx_t *rtree_ctx, const edata_t *edata, bool dependent,
bool init_missing, rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b);
#endif /* JEMALLOC_INTERNAL_EMAP_H */

View File

@@ -19,8 +19,6 @@
#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
extern size_t opt_lg_extent_max_active_fit;
extern rtree_t extents_rtree;
edata_t *ecache_alloc(tsdn_t *tsdn, arena_t *arena, ehooks_t *ehooks,
ecache_t *ecache, void *new_addr, size_t size, size_t pad, size_t alignment,
bool slab, szind_t szind, bool *zero);

View File

@@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/rtree.h"
@@ -81,7 +82,7 @@ iealloc(tsdn_t *tsdn, const void *ptr) {
rtree_ctx_t rtree_ctx_fallback;
rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
return rtree_edata_read(tsdn, &extents_rtree, rtree_ctx,
return rtree_edata_read(tsdn, &emap_global.rtree, rtree_ctx,
(uintptr_t)ptr, true);
}

View File

@@ -45,7 +45,7 @@
#define WITNESS_RANK_EXTENTS 15U
#define WITNESS_RANK_EDATA_CACHE 16U
#define WITNESS_RANK_EXTENT_POOL 17U
#define WITNESS_RANK_EMAP 17U
#define WITNESS_RANK_RTREE 18U
#define WITNESS_RANK_BASE 19U
#define WITNESS_RANK_ARENA_LARGE 20U