Fix base allocator THP auto mode locking and stats.

Added proper synchronization for switching to using THP in auto mode.  Also
fixed stats for number of THPs used.
This commit is contained in:
Qi Wang 2017-11-07 19:40:38 -08:00 committed by Qi Wang
parent b5d071c266
commit cb3b72b975
2 changed files with 18 additions and 21 deletions

View File

@ -30,6 +30,8 @@ struct base_s {
/* Protects base_alloc() and base_stats_get() operations. */ /* Protects base_alloc() and base_stats_get() operations. */
malloc_mutex_t mtx; malloc_mutex_t mtx;
/* Using THP when true (metadata_thp auto mode). */
bool auto_thp_switched;
/* /*
* Most recent size class in the series of increasingly large base * Most recent size class in the series of increasingly large base
* extents. Logarithmic spacing between subsequent allocations ensures * extents. Logarithmic spacing between subsequent allocations ensures

View File

@ -139,23 +139,13 @@ base_get_num_blocks(base_t *base, bool with_new_block) {
return n_blocks; return n_blocks;
} }
static bool
base_auto_thp_triggered(base_t *base, bool with_new_block) {
assert(opt_metadata_thp == metadata_thp_auto);
if (base_ind_get(base) != 0) {
return base_get_num_blocks(base, with_new_block) >=
BASE_AUTO_THP_THRESHOLD;
}
return base_get_num_blocks(base, with_new_block) >=
BASE_AUTO_THP_THRESHOLD_A0;
}
static void static void
base_auto_thp_switch(base_t *base) { base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
assert(opt_metadata_thp == metadata_thp_auto); assert(opt_metadata_thp == metadata_thp_auto);
malloc_mutex_assert_owner(tsdn, &base->mtx);
if (base->auto_thp_switched) {
return;
}
/* Called when adding a new block. */ /* Called when adding a new block. */
bool should_switch; bool should_switch;
if (base_ind_get(base) != 0) { if (base_ind_get(base) != 0) {
@ -169,14 +159,16 @@ base_auto_thp_switch(base_t *base) {
return; return;
} }
assert(base->n_thp == 0); base->auto_thp_switched = true;
assert(!config_stats || base->n_thp == 0);
/* Make the initial blocks THP lazily. */ /* Make the initial blocks THP lazily. */
base_block_t *block = base->blocks; base_block_t *block = base->blocks;
while (block != NULL) { while (block != NULL) {
assert((block->size & HUGEPAGE_MASK) == 0); assert((block->size & HUGEPAGE_MASK) == 0);
pages_huge(block, block->size); pages_huge(block, block->size);
if (config_stats) { if (config_stats) {
base->n_thp += block->size >> LG_HUGEPAGE; base->n_thp += HUGEPAGE_CEILING(block->size -
extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
} }
block = block->next; block = block->next;
assert(block == NULL || (base_ind_get(base) == 0)); assert(block == NULL || (base_ind_get(base) == 0));
@ -226,7 +218,7 @@ base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
assert(base->allocated <= base->resident); assert(base->allocated <= base->resident);
assert(base->resident <= base->mapped); assert(base->resident <= base->mapped);
if (metadata_thp_madvise() && (opt_metadata_thp == if (metadata_thp_madvise() && (opt_metadata_thp ==
metadata_thp_always || base_auto_thp_triggered(base, false))) { metadata_thp_always || base->auto_thp_switched)) {
base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size) base->n_thp += (HUGEPAGE_CEILING((uintptr_t)addr + size)
- HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >> - HUGEPAGE_CEILING((uintptr_t)addr - gap_size)) >>
LG_HUGEPAGE; LG_HUGEPAGE;
@ -289,10 +281,12 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
} else if (opt_metadata_thp == metadata_thp_auto && } else if (opt_metadata_thp == metadata_thp_auto &&
base != NULL) { base != NULL) {
/* base != NULL indicates this is not a new base. */ /* base != NULL indicates this is not a new base. */
if (base_auto_thp_triggered(base, true)) { malloc_mutex_lock(tsdn, &base->mtx);
base_auto_thp_switch(tsdn, base);
if (base->auto_thp_switched) {
pages_huge(addr, block_size); pages_huge(addr, block_size);
} }
base_auto_thp_switch(base); malloc_mutex_unlock(tsdn, &base->mtx);
} }
} }
@ -334,7 +328,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
base->mapped += block->size; base->mapped += block->size;
if (metadata_thp_madvise() && if (metadata_thp_madvise() &&
!(opt_metadata_thp == metadata_thp_auto !(opt_metadata_thp == metadata_thp_auto
&& !base_auto_thp_triggered(base, false))) { && !base->auto_thp_switched)) {
assert(base->n_thp > 0); assert(base->n_thp > 0);
base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >> base->n_thp += HUGEPAGE_CEILING(sizeof(base_block_t)) >>
LG_HUGEPAGE; LG_HUGEPAGE;
@ -376,6 +370,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->pind_last = pind_last; base->pind_last = pind_last;
base->extent_sn_next = extent_sn_next; base->extent_sn_next = extent_sn_next;
base->blocks = block; base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < NSIZES; i++) { for (szind_t i = 0; i < NSIZES; i++) {
extent_heap_new(&base->avail[i]); extent_heap_new(&base->avail[i]);
} }