Protect the rtree/extent interactions with a mutex pool.
Instead of embedding a lock bit in rtree leaf elements, we associate extents with a small set of mutexes. This gets us two things: - We can use the system mutexes. This (hypothetically) protects us from priority inversion, and lets us stop doing a backoff/sleep loop, instead opting for precise wakeups from the mutex. - Cuts down on the number of mutex acquisitions we have to do (from 4 in the worst case to two). We end up simplifying most of the rtree code (which no longer has to deal with locking or concurrency at all), at the cost of additional complexity in the extent code: since the mutex protecting the rtree leaf elements is determined by reading the extent out of those elements, the initial read is racy, so that we may acquire an out of date mutex. We re-check the extent in the leaf after acquiring the mutex to protect us from this race.
This commit is contained in:
committed by
David Goldblatt
parent
26c792e61a
commit
3f685e8824
@@ -2,6 +2,7 @@
|
||||
#define JEMALLOC_INTERNAL_RTREE_STRUCTS_H
|
||||
|
||||
#include "jemalloc/internal/atomic.h"
|
||||
#include "jemalloc/internal/mutex_pool_structs.h"
|
||||
|
||||
struct rtree_node_elm_s {
|
||||
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
|
||||
@@ -18,13 +19,12 @@ struct rtree_leaf_elm_s {
|
||||
* x: index
|
||||
* e: extent
|
||||
* b: slab
|
||||
* k: lock
|
||||
*
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee00bk
|
||||
* 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
|
||||
*/
|
||||
atomic_p_t le_bits;
|
||||
#else
|
||||
atomic_p_t le_extent; /* (extent_t *), lock in low bit */
|
||||
atomic_p_t le_extent; /* (extent_t *) */
|
||||
atomic_u_t le_szind; /* (szind_t) */
|
||||
atomic_b_t le_slab; /* (bool) */
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user