Fix extent_recycle()'s cache-oblivious padding support.
Add padding *after* computing the size class, so that the optimal size
class isn't skipped during search for a usable extent. This regression
was caused by b46261d58b
(Implement
cache-oblivious support for huge size classes.).
This commit is contained in:
parent
ea9961acdb
commit
4a7852137d
11
src/extent.c
11
src/extent.c
@ -427,12 +427,13 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
|
|||||||
assert(prev == NULL || extent_past_get(prev) == new_addr);
|
assert(prev == NULL || extent_past_get(prev) == new_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
size = usize + pad;
|
alloc_size = ((new_addr != NULL) ? usize : s2u(usize +
|
||||||
alloc_size = (new_addr != NULL) ? size : s2u(size +
|
PAGE_CEILING(alignment) - PAGE)) + pad;
|
||||||
PAGE_CEILING(alignment) - PAGE);
|
if (alloc_size > LARGE_MAXCLASS + pad || alloc_size < usize) {
|
||||||
/* Beware size_t wrap-around. */
|
/* Too large, possibly wrapped around. */
|
||||||
if (alloc_size < usize)
|
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
}
|
||||||
|
size = usize + pad;
|
||||||
if (!locked)
|
if (!locked)
|
||||||
malloc_mutex_lock(tsdn, &arena->extents_mtx);
|
malloc_mutex_lock(tsdn, &arena->extents_mtx);
|
||||||
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
extent_hooks_assure_initialized(arena, r_extent_hooks);
|
||||||
|
Loading…
Reference in New Issue
Block a user