Fix/simplify chunk_recycle() allocation size computations.

Remove outer CHUNK_CEILING(s2u(...)) from alloc_size computation, since
s2u() may overflow (and return 0), and CHUNK_CEILING() is only needed
around the alignment portion of the computation.

This fixes a regression caused by
5707d6f952 (Quantize szad trees by size
class.) and first released in 4.0.0.

This resolves #497.
This commit is contained in:
Jason Evans 2016-11-11 21:58:05 -08:00
parent 2cdf07aba9
commit b9408d77a6

View File

@ -209,7 +209,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
assert(CHUNK_CEILING(size) == size);
assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize);
assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
@ -217,7 +220,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
*/
assert(dalloc_node || new_addr != NULL);
alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);