Convert extents_t's npages field to use C11-style atomics

In the process, we can do some strength reduction, changing the fetch-adds and
fetch-subs to be simple loads followed by stores, since the modifications all
occur while holding the mutex.
This commit is contained in:
David Goldblatt 2017-03-07 17:57:48 -08:00 committed by David Goldblatt
parent dafadce622
commit 8adab26972
2 changed files with 28 additions and 8 deletions

View File

@ -109,9 +109,12 @@ struct extents_s {
/* /*
* Page sum for all extents in heaps. * Page sum for all extents in heaps.
* *
* Synchronization: atomic. * The synchronization here is a little tricky. Modifications to npages
* must hold mtx, but reads need not (though, a reader who sees npages
* without holding the mutex can't assume anything about the rest of the
* state of the extents_t).
*/ */
size_t npages; atomic_zu_t npages;
/* All stored extents must be in the same state. */ /* All stored extents must be in the same state. */
extent_state_t state; extent_state_t state;

View File

@ -186,7 +186,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
extent_heap_new(&extents->heaps[i]); extent_heap_new(&extents->heaps[i]);
} }
extent_list_init(&extents->lru); extent_list_init(&extents->lru);
extents->npages = 0; atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
extents->state = state; extents->state = state;
extents->delay_coalesce = delay_coalesce; extents->delay_coalesce = delay_coalesce;
return false; return false;
@ -199,7 +199,7 @@ extents_state_get(const extents_t *extents) {
size_t size_t
extents_npages_get(extents_t *extents) { extents_npages_get(extents_t *extents) {
return atomic_read_zu(&extents->npages); return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
} }
static void static void
@ -216,7 +216,15 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
extent_list_append(&extents->lru, extent); extent_list_append(&extents->lru, extent);
} }
size_t npages = size >> LG_PAGE; size_t npages = size >> LG_PAGE;
atomic_add_zu(&extents->npages, npages); /*
* All modifications to npages hold the mutex (as asserted above), so we
* don't need an atomic fetch-add; we can get by with a load followed by
* a store.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
atomic_store_zu(&extents->npages, cur_extents_npages + npages,
ATOMIC_RELAXED);
} }
static void static void
@ -233,8 +241,15 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
extent_list_remove(&extents->lru, extent); extent_list_remove(&extents->lru, extent);
} }
size_t npages = size >> LG_PAGE; size_t npages = size >> LG_PAGE;
assert(atomic_read_zu(&extents->npages) >= npages); /*
atomic_sub_zu(&extents->npages, size >> LG_PAGE); * As in extents_insert_locked, we hold extents->mtx and so don't need
* atomic operations for updating extents->npages.
*/
size_t cur_extents_npages =
atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
assert(cur_extents_npages >= npages);
atomic_store_zu(&extents->npages,
cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
} }
/* /*
@ -299,7 +314,9 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
} }
/* Check the eviction limit. */ /* Check the eviction limit. */
size_t npages = extent_size_get(extent) >> LG_PAGE; size_t npages = extent_size_get(extent) >> LG_PAGE;
if (atomic_read_zu(&extents->npages) - npages < npages_min) { size_t extents_npages = atomic_load_zu(&extents->npages,
ATOMIC_RELAXED);
if (extents_npages - npages < npages_min) {
extent = NULL; extent = NULL;
goto label_return; goto label_return;
} }