Switch to fine-grained reentrancy support.
Previously we had a general detection and support of reentrancy, at the cost of having branches and inc / dec operations on fast paths. To avoid taxing fast paths, we move the reentrancy operations onto tsd slow state, and only modify reentrancy level around external calls (that might trigger reentrancy).
This commit is contained in:
@@ -33,6 +33,8 @@ tcache_t *tcache_get(tsd_t *tsd);
|
||||
malloc_cpuid_t malloc_getcpu(void);
|
||||
unsigned percpu_arena_choose(void);
|
||||
unsigned percpu_arena_ind_limit(void);
|
||||
void pre_reentrancy(tsd_t *tsd);
|
||||
void post_reentrancy(tsd_t *tsd);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
|
||||
@@ -445,6 +447,27 @@ tcache_get(tsd_t *tsd) {
|
||||
|
||||
return tsd_tcachep_get(tsd);
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
pre_reentrancy(tsd_t *tsd) {
|
||||
bool fast = tsd_fast(tsd);
|
||||
++*tsd_reentrancy_levelp_get(tsd);
|
||||
if (fast) {
|
||||
/* Prepare slow path for reentrancy. */
|
||||
tsd_slow_update(tsd);
|
||||
assert(tsd->state == tsd_state_nominal_slow);
|
||||
}
|
||||
}
|
||||
|
||||
JEMALLOC_INLINE void
|
||||
post_reentrancy(tsd_t *tsd) {
|
||||
int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
|
||||
assert(*reentrancy_level > 0);
|
||||
if (--*reentrancy_level == 0) {
|
||||
tsd_slow_update(tsd);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
|
||||
|
@@ -16,7 +16,7 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
|
||||
}
|
||||
|
||||
/* During reentrancy, arena 0 is the safest bet. */
|
||||
if (*tsd_reentrancy_levelp_get(tsd) > 1) {
|
||||
if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) {
|
||||
return arena_get(tsd_tsdn(tsd), 0, true);
|
||||
}
|
||||
|
||||
|
@@ -117,8 +117,8 @@ idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
|
||||
if (config_stats && is_internal) {
|
||||
arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr));
|
||||
}
|
||||
if (!is_internal && *tsd_reentrancy_levelp_get(tsdn_tsd(tsdn)) != 0) {
|
||||
tcache = NULL;
|
||||
if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) {
|
||||
assert(tcache == NULL);
|
||||
}
|
||||
arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path);
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ tsd_t *tsdn_tsd(tsdn_t *tsdn);
|
||||
rtree_ctx_t *tsd_rtree_ctx(tsd_t *tsd);
|
||||
rtree_ctx_t *tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback);
|
||||
bool tsd_fast(tsd_t *tsd);
|
||||
void tsd_assert_fast(tsd_t *tsd);
|
||||
bool tsd_assert_fast(tsd_t *tsd);
|
||||
#endif
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
|
||||
@@ -52,9 +52,11 @@ MALLOC_TSD
|
||||
#undef MALLOC_TSD_getset_no
|
||||
#undef O
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
tsd_assert_fast(tsd_t *tsd) {
|
||||
assert(!malloc_slow && tsd_tcache_enabled_get(tsd));
|
||||
assert(!malloc_slow && tsd_tcache_enabled_get(tsd) &&
|
||||
tsd_reentrancy_level_get(tsd) == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE bool
|
||||
|
@@ -55,7 +55,7 @@ struct tsd_init_head_s {
|
||||
/* O(name, type, [gs]et, init, cleanup) */ \
|
||||
O(tcache_enabled, bool, yes, yes, no) \
|
||||
O(arenas_tdata_bypass, bool, no, no, no) \
|
||||
O(reentrancy_level, int8_t, no, no, no) \
|
||||
O(reentrancy_level, int8_t, yes, no, no) \
|
||||
O(narenas_tdata, uint32_t, yes, no, no) \
|
||||
O(thread_allocated, uint64_t, yes, no, no) \
|
||||
O(thread_deallocated, uint64_t, yes, no, no) \
|
||||
|
Reference in New Issue
Block a user