Fix extent_alloc_dss() regression.

Fix extent_alloc_dss() to account for bytes that are not a multiple of
the page size.  This regression was introduced by
577d4572b0 (Make dss operations
lockless.), which was first released in 4.3.0.
This commit is contained in:
Jason Evans 2017-02-09 13:00:59 -08:00
parent 6b8ef771a9
commit cd2501efd6

View File

@ -121,35 +121,45 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
* malloc. * malloc.
*/ */
while (true) { while (true) {
void *ret, *max_cur, *gap_addr, *dss_next, *dss_prev; void *max_cur = extent_dss_max_update(new_addr);
size_t gap_size;
intptr_t incr;
max_cur = extent_dss_max_update(new_addr);
if (max_cur == NULL) { if (max_cur == NULL) {
goto label_oom; goto label_oom;
} }
/* /*
* Compute how much gap space (if any) is necessary to * Compute how much page-aligned gap space (if any) is
* satisfy alignment. This space can be recycled for * necessary to satisfy alignment. This space can be
* later use. * recycled for later use.
*/ */
gap_addr = (void *)(PAGE_CEILING((uintptr_t)max_cur)); void *gap_addr_page = (void *)(PAGE_CEILING(
ret = (void *)ALIGNMENT_CEILING((uintptr_t)gap_addr, (uintptr_t)max_cur));
PAGE_CEILING(alignment)); void *ret = (void *)ALIGNMENT_CEILING(
gap_size = (uintptr_t)ret - (uintptr_t)gap_addr; (uintptr_t)gap_addr_page, alignment);
if (gap_size != 0) { size_t gap_size_page = (uintptr_t)ret -
extent_init(gap, arena, gap_addr, gap_size, (uintptr_t)gap_addr_page;
gap_size, arena_extent_sn_next(arena), if (gap_size_page != 0) {
extent_init(gap, arena, gap_addr_page,
gap_size_page, gap_size_page,
arena_extent_sn_next(arena),
extent_state_active, false, true, false); extent_state_active, false, true, false);
} }
dss_next = (void *)((uintptr_t)ret + size); /*
* Compute the address just past the end of the desired
* allocation space.
*/
void *dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)max_cur || if ((uintptr_t)ret < (uintptr_t)max_cur ||
(uintptr_t)dss_next < (uintptr_t)max_cur) { (uintptr_t)dss_next < (uintptr_t)max_cur) {
goto label_oom; /* Wrap-around. */ goto label_oom; /* Wrap-around. */
} }
incr = gap_size + size; /* Compute the increment, including subpage bytes. */
void *gap_addr_subpage = max_cur;
size_t gap_size_subpage = (uintptr_t)ret -
(uintptr_t)gap_addr_subpage;
intptr_t incr = gap_size_subpage + size;
assert((uintptr_t)max_cur + incr == (uintptr_t)ret +
size);
/* /*
* Optimistically update dss_max, and roll back below if * Optimistically update dss_max, and roll back below if
@ -162,10 +172,10 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
} }
/* Try to allocate. */ /* Try to allocate. */
dss_prev = extent_dss_sbrk(incr); void *dss_prev = extent_dss_sbrk(incr);
if (dss_prev == max_cur) { if (dss_prev == max_cur) {
/* Success. */ /* Success. */
if (gap_size != 0) { if (gap_size_page != 0) {
extent_dalloc_gap(tsdn, arena, gap); extent_dalloc_gap(tsdn, arena, gap);
} else { } else {
extent_dalloc(tsdn, arena, gap); extent_dalloc(tsdn, arena, gap);