Do not assume dss never decreases.

An sbrk() caller outside jemalloc can decrease the dss, so add a
separate atomic boolean to explicitly track whether jemalloc is
concurrently calling sbrk(), rather than depending on state outside
jemalloc's full control.

This resolves #802.
This commit is contained in:
Jason Evans 2017-05-16 13:25:17 -07:00
parent 067b970130
commit 196a53c2ae

View File

@ -28,6 +28,8 @@ static atomic_u_t dss_prec_default = ATOMIC_INIT(
/* Base address of the DSS. */ /* Base address of the DSS. */
static void *dss_base; static void *dss_base;
/* Atomic boolean indicating whether a thread is currently extending DSS. */
static atomic_b_t dss_extending;
/* Atomic boolean indicating whether the DSS is exhausted. */ /* Atomic boolean indicating whether the DSS is exhausted. */
static atomic_b_t dss_exhausted; static atomic_b_t dss_exhausted;
/* Atomic current upper limit on DSS addresses. */ /* Atomic current upper limit on DSS addresses. */
@ -65,37 +67,41 @@ extent_dss_prec_set(dss_prec_t dss_prec) {
return false; return false;
} }
static void
extent_dss_extending_start(void) {
spin_t spinner = SPIN_INITIALIZER;
while (true) {
bool expected = false;
if (atomic_compare_exchange_weak_b(&dss_extending, &expected,
true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
break;
}
spin_adaptive(&spinner);
}
}
static void
extent_dss_extending_finish(void) {
assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED));
atomic_store_b(&dss_extending, false, ATOMIC_RELEASE);
}
static void * static void *
extent_dss_max_update(void *new_addr) { extent_dss_max_update(void *new_addr) {
void *max_cur;
/* /*
* Get the current end of the DSS as max_cur and assure that dss_max is * Get the current end of the DSS as max_cur and assure that dss_max is
* up to date. * up to date.
*/ */
spin_t spinner = SPIN_INITIALIZER; void *max_cur = extent_dss_sbrk(0);
while (true) { if (max_cur == (void *)-1) {
void *max_prev = atomic_load_p(&dss_max, ATOMIC_RELAXED); return NULL;
max_cur = extent_dss_sbrk(0);
if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
/*
* Another thread optimistically updated dss_max. Wait
* for it to finish.
*/
spin_adaptive(&spinner);
continue;
}
if (atomic_compare_exchange_weak_p(&dss_max, &max_prev,
max_cur, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
break;
}
} }
atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE);
/* Fixed new_addr can only be supported if it is at the edge of DSS. */ /* Fixed new_addr can only be supported if it is at the edge of DSS. */
if (new_addr != NULL && max_cur != new_addr) { if (new_addr != NULL && max_cur != new_addr) {
return NULL; return NULL;
} }
return max_cur; return max_cur;
} }
@ -121,6 +127,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL; return NULL;
} }
extent_dss_extending_start();
if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) {
/* /*
* The loop is necessary to recover from races with other * The loop is necessary to recover from races with other
@ -168,21 +175,14 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
assert((uintptr_t)max_cur + incr == (uintptr_t)ret + assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size); size);
/*
* Optimistically update dss_max, and roll back below if
* sbrk() fails. No other thread will try to extend the
* DSS while dss_max is greater than the current DSS
* max reported by sbrk(0).
*/
if (!atomic_compare_exchange_weak_p(&dss_max, &max_cur,
dss_next, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) {
continue;
}
/* Try to allocate. */ /* Try to allocate. */
void *dss_prev = extent_dss_sbrk(incr); void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) { if (dss_prev == max_cur) {
/* Success. */ /* Success. */
atomic_store_p(&dss_max, dss_next,
ATOMIC_RELEASE);
extent_dss_extending_finish();
if (gap_size_page != 0) { if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap); extent_dalloc_gap(tsdn, arena, gap);
} else { } else {
@ -209,14 +209,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
} }
/* /*
* Failure, whether due to OOM or a race with a raw * Failure, whether due to OOM or a race with a raw
* sbrk() call from outside the allocator. Try to roll * sbrk() call from outside the allocator.
* back optimistic dss_max update; if rollback fails,
* it's due to another caller of this function having
* succeeded since this invocation started, in which
* case rollback is not necessary.
*/ */
atomic_compare_exchange_strong_p(&dss_max, &dss_next,
max_cur, ATOMIC_ACQ_REL, ATOMIC_RELAXED);
if (dss_prev == (void *)-1) { if (dss_prev == (void *)-1) {
/* OOM. */ /* OOM. */
atomic_store_b(&dss_exhausted, true, atomic_store_b(&dss_exhausted, true,
@ -226,6 +220,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
} }
} }
label_oom: label_oom:
extent_dss_extending_finish();
extent_dalloc(tsdn, arena, gap); extent_dalloc(tsdn, arena, gap);
return NULL; return NULL;
} }
@ -265,6 +260,7 @@ extent_dss_boot(void) {
cassert(have_dss); cassert(have_dss);
dss_base = extent_dss_sbrk(0); dss_base = extent_dss_sbrk(0);
atomic_store_b(&dss_extending, false, ATOMIC_RELAXED);
atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED);
atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED);
} }