Add init function support to tsd members.

This will facilitate embedding tcache into tsd, which will require proper
initialization cannot be done via the static initializer.  Make tsd->rtree_ctx
to be initialized via rtree_ctx_data_init().
This commit is contained in:
Qi Wang 2017-03-29 13:18:02 -07:00 committed by Qi Wang
parent 5bf800a542
commit 9ed84b0d45
9 changed files with 74 additions and 33 deletions

View File

@ -424,6 +424,7 @@ prof_thread_name_set
psz2ind psz2ind
psz2u psz2u
rtree_clear rtree_clear
rtree_ctx_data_init
rtree_delete rtree_delete
rtree_extent_read rtree_extent_read
rtree_extent_szind_read rtree_extent_szind_read
@ -520,6 +521,7 @@ tsd_booted
tsd_booted_get tsd_booted_get
tsd_cleanup tsd_cleanup
tsd_cleanup_wrapper tsd_cleanup_wrapper
tsd_data_init
tsd_fetch tsd_fetch
tsd_fetch_impl tsd_fetch_impl
tsd_get tsd_get

View File

@ -43,5 +43,6 @@ void rtree_leaf_elm_witness_access(tsdn_t *tsdn, const rtree_t *rtree,
const rtree_leaf_elm_t *elm); const rtree_leaf_elm_t *elm);
void rtree_leaf_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree, void rtree_leaf_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
const rtree_leaf_elm_t *elm); const rtree_leaf_elm_t *elm);
bool rtree_ctx_data_init(rtree_ctx_t *ctx);
#endif /* JEMALLOC_INTERNAL_RTREE_EXTERNS_H */ #endif /* JEMALLOC_INTERNAL_RTREE_EXTERNS_H */

View File

@ -38,6 +38,9 @@ typedef struct rtree_s rtree_t;
# define RTREE_LEAF_COMPACT # define RTREE_LEAF_COMPACT
#endif #endif
/* Needed for initialization only. */
#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
/* /*
* Number of leafkey/leaf pairs to cache. Each entry supports an entire leaf, * Number of leafkey/leaf pairs to cache. Each entry supports an entire leaf,
* so the cache hit rate is typically high even with a small number of entries. * so the cache hit rate is typically high even with a small number of entries.
@ -51,12 +54,13 @@ typedef struct rtree_s rtree_t;
* the tree nodes, and the cache will itself suffer cache misses if made overly * the tree nodes, and the cache will itself suffer cache misses if made overly
* large, not to mention the cost of linear search. * large, not to mention the cost of linear search.
*/ */
#define RTREE_CTX_NCACHE 8 #define RTREE_CTX_NCACHE 8
/* Static initializer for rtree_ctx_t. */ /*
#define RTREE_CTX_INITIALIZER { \ * Zero initializer required for tsd initialization only. Proper initialization
{{0, NULL} /* C initializes all trailing elements to NULL. */} \ * done via rtree_ctx_data_init().
} */
#define RTREE_CTX_ZERO_INITIALIZER {{{0}}}
/* /*
* Maximum number of concurrently acquired elements per thread. This controls * Maximum number of concurrently acquired elements per thread. This controls

View File

@ -14,5 +14,6 @@ void *tsd_init_check_recursion(tsd_init_head_t *head,
void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
#endif #endif
void tsd_cleanup(void *arg); void tsd_cleanup(void *arg);
bool tsd_data_init(void *arg);
#endif /* JEMALLOC_INTERNAL_TSD_EXTERNS_H */ #endif /* JEMALLOC_INTERNAL_TSD_EXTERNS_H */

View File

@ -8,7 +8,7 @@ tsd_t *tsd_fetch_impl(bool init);
tsd_t *tsd_fetch(void); tsd_t *tsd_fetch(void);
tsdn_t *tsd_tsdn(tsd_t *tsd); tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd); bool tsd_nominal(tsd_t *tsd);
#define O(n, t, gs, c) \ #define O(n, t, gs, i, c) \
t *tsd_##n##p_get(tsd_t *tsd); \ t *tsd_##n##p_get(tsd_t *tsd); \
t tsd_##n##_get(tsd_t *tsd); \ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n); void tsd_##n##_set(tsd_t *tsd, t n);
@ -39,9 +39,11 @@ tsd_fetch_impl(bool init) {
tsd->state = tsd_state_nominal; tsd->state = tsd_state_nominal;
/* Trigger cleanup handler registration. */ /* Trigger cleanup handler registration. */
tsd_set(tsd); tsd_set(tsd);
tsd_data_init(tsd);
} else if (tsd->state == tsd_state_purgatory) { } else if (tsd->state == tsd_state_purgatory) {
tsd->state = tsd_state_reincarnated; tsd->state = tsd_state_reincarnated;
tsd_set(tsd); tsd_set(tsd);
tsd_data_init(tsd);
} else { } else {
assert(tsd->state == tsd_state_reincarnated); assert(tsd->state == tsd_state_reincarnated);
} }
@ -76,7 +78,7 @@ tsd_##n##_set(tsd_t *tsd, t n) { \
tsd->n = n; \ tsd->n = n; \
} }
#define MALLOC_TSD_getset_no(n, t) #define MALLOC_TSD_getset_no(n, t)
#define O(n, t, gs, c) \ #define O(n, t, gs, i, c) \
JEMALLOC_ALWAYS_INLINE t * \ JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get(tsd_t *tsd) { \ tsd_##n##p_get(tsd_t *tsd) { \
return &tsd->n; \ return &tsd->n; \
@ -121,8 +123,7 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
* return a pointer to it. * return a pointer to it.
*/ */
if (unlikely(tsdn_null(tsdn))) { if (unlikely(tsdn_null(tsdn))) {
static const rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_data_init(fallback);
memcpy(fallback, &rtree_ctx, sizeof(rtree_ctx_t));
return fallback; return fallback;
} }
return tsd_rtree_ctx(tsdn_tsd(tsdn)); return tsd_rtree_ctx(tsdn_tsd(tsdn));

View File

@ -15,23 +15,23 @@ struct tsd_init_head_s {
#endif #endif
#define MALLOC_TSD \ #define MALLOC_TSD \
/* O(name, type, [gs]et, cleanup) */ \ /* O(name, type, [gs]et, init, cleanup) */ \
O(tcache, tcache_t *, yes, yes) \ O(tcache, tcache_t *, yes, no, yes) \
O(thread_allocated, uint64_t, yes, no) \ O(thread_allocated, uint64_t, yes, no, no) \
O(thread_deallocated, uint64_t, yes, no) \ O(thread_deallocated, uint64_t, yes, no, no) \
O(prof_tdata, prof_tdata_t *, yes, yes) \ O(prof_tdata, prof_tdata_t *, yes, no, yes) \
O(iarena, arena_t *, yes, yes) \ O(iarena, arena_t *, yes, no, yes) \
O(arena, arena_t *, yes, yes) \ O(arena, arena_t *, yes, no, yes) \
O(arenas_tdata, arena_tdata_t *,yes, yes) \ O(arenas_tdata, arena_tdata_t *,yes, no, yes) \
O(narenas_tdata, unsigned, yes, no) \ O(narenas_tdata, unsigned, yes, no, no) \
O(arenas_tdata_bypass, bool, no, no) \ O(arenas_tdata_bypass, bool, no, no, no) \
O(tcache_enabled, tcache_enabled_t, \ O(tcache_enabled, tcache_enabled_t, \
yes, no) \ yes, no, no) \
O(rtree_ctx, rtree_ctx_t, no, no) \ O(rtree_ctx, rtree_ctx_t, no, yes, no) \
O(witnesses, witness_list_t, no, yes) \ O(witnesses, witness_list_t, no, no, yes) \
O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \ O(rtree_leaf_elm_witnesses, rtree_leaf_elm_witness_tsd_t, \
no, no) \ no, no, no) \
O(witness_fork, bool, yes, no) \ O(witness_fork, bool, yes, no, no)
#define TSD_INITIALIZER { \ #define TSD_INITIALIZER { \
tsd_state_uninitialized, \ tsd_state_uninitialized, \
@ -45,7 +45,7 @@ struct tsd_init_head_s {
0, \ 0, \
false, \ false, \
tcache_enabled_default, \ tcache_enabled_default, \
RTREE_CTX_INITIALIZER, \ RTREE_CTX_ZERO_INITIALIZER, \
ql_head_initializer(witnesses), \ ql_head_initializer(witnesses), \
RTREE_ELM_WITNESS_TSD_INITIALIZER, \ RTREE_ELM_WITNESS_TSD_INITIALIZER, \
false \ false \
@ -53,7 +53,7 @@ struct tsd_init_head_s {
struct tsd_s { struct tsd_s {
tsd_state_t state; tsd_state_t state;
#define O(n, t, gs, c) \ #define O(n, t, gs, i, c) \
t n; t n;
MALLOC_TSD MALLOC_TSD
#undef O #undef O

View File

@ -424,3 +424,14 @@ rtree_leaf_elm_witness_release(tsdn_t *tsdn, const rtree_t *rtree,
witness_unlock(tsdn, witness); witness_unlock(tsdn, witness);
rtree_leaf_elm_witness_dalloc(tsdn_tsd(tsdn), witness, elm); rtree_leaf_elm_witness_dalloc(tsdn_tsd(tsdn), witness, elm);
} }
bool
rtree_ctx_data_init(rtree_ctx_t *ctx) {
for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
cache->leafkey = RTREE_LEAFKEY_INVALID;
cache->leaf = NULL;
}
return false;
}

View File

@ -60,6 +60,23 @@ malloc_tsd_cleanup_register(bool (*f)(void)) {
ncleanups++; ncleanups++;
} }
bool
tsd_data_init(void *arg) {
tsd_t *tsd = (tsd_t *)arg;
#define MALLOC_TSD_init_yes(n, t) \
if (n##_data_init(&tsd->n)) { \
return true; \
}
#define MALLOC_TSD_init_no(n, t)
#define O(n, t, gs, i, c) \
MALLOC_TSD_init_##i(n, t)
MALLOC_TSD
#undef MALLOC_TSD_init_yes
#undef MALLOC_TSD_init_no
#undef O
return false;
}
void void
tsd_cleanup(void *arg) { tsd_cleanup(void *arg) {
tsd_t *tsd = (tsd_t *)arg; tsd_t *tsd = (tsd_t *)arg;
@ -72,7 +89,7 @@ tsd_cleanup(void *arg) {
#define MALLOC_TSD_cleanup_yes(n, t) \ #define MALLOC_TSD_cleanup_yes(n, t) \
n##_cleanup(tsd); n##_cleanup(tsd);
#define MALLOC_TSD_cleanup_no(n, t) #define MALLOC_TSD_cleanup_no(n, t)
#define O(n, t, gs, c) \ #define O(n, t, gs, i, c) \
MALLOC_TSD_cleanup_##c(n, t) MALLOC_TSD_cleanup_##c(n, t)
MALLOC_TSD MALLOC_TSD
#undef MALLOC_TSD_cleanup_yes #undef MALLOC_TSD_cleanup_yes

View File

@ -68,7 +68,8 @@ TEST_BEGIN(test_rtree_read_empty) {
tsdn = tsdn_fetch(); tsdn = tsdn_fetch();
rtree_t *rtree = &test_rtree; rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE, assert_ptr_null(rtree_extent_read(tsdn, rtree, &rtree_ctx, PAGE,
false), "rtree_extent_read() should return NULL for empty tree"); false), "rtree_extent_read() should return NULL for empty tree");
@ -89,7 +90,8 @@ typedef struct {
static void * static void *
thd_start(void *varg) { thd_start(void *varg) {
thd_start_arg_t *arg = (thd_start_arg_t *)varg; thd_start_arg_t *arg = (thd_start_arg_t *)varg;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
sfmt_t *sfmt; sfmt_t *sfmt;
extent_t *extent; extent_t *extent;
tsdn_t *tsdn; tsdn_t *tsdn;
@ -173,7 +175,8 @@ TEST_BEGIN(test_rtree_extrema) {
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
rtree_t *rtree = &test_rtree; rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a, assert_false(rtree_write(tsdn, rtree, &rtree_ctx, PAGE, &extent_a,
@ -207,8 +210,8 @@ TEST_BEGIN(test_rtree_bits) {
extent_state_active, false, false); extent_state_active, false, false);
rtree_t *rtree = &test_rtree; rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure"); assert_false(rtree_new(rtree, false), "Unexpected rtree_new() failure");
for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) { for (unsigned i = 0; i < sizeof(keys)/sizeof(uintptr_t); i++) {
@ -240,7 +243,8 @@ TEST_BEGIN(test_rtree_random) {
tsdn_t *tsdn = tsdn_fetch(); tsdn_t *tsdn = tsdn_fetch();
uintptr_t keys[NSET]; uintptr_t keys[NSET];
rtree_t *rtree = &test_rtree; rtree_t *rtree = &test_rtree;
rtree_ctx_t rtree_ctx = RTREE_CTX_INITIALIZER; rtree_ctx_t rtree_ctx;
rtree_ctx_data_init(&rtree_ctx);
extent_t extent; extent_t extent;
extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0, extent_init(&extent, NULL, NULL, 0, false, NSIZES, 0,