From 560a4e1e01d3733c2f107cdb3cc3580f3ed84442 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 11 Sep 2015 16:18:53 -0700 Subject: [PATCH] Fix xallocx() bugs. Fix xallocx() bugs related to the 'extra' parameter when specified as non-zero. --- ChangeLog | 2 + include/jemalloc/internal/arena.h | 2 +- include/jemalloc/internal/huge.h | 7 +- .../jemalloc/internal/jemalloc_internal.h.in | 2 +- include/jemalloc/internal/size_classes.sh | 5 + src/arena.c | 210 +++++++-------- src/huge.c | 111 ++++---- test/integration/rallocx.c | 2 +- test/integration/xallocx.c | 242 +++++++++++++++++- 9 files changed, 399 insertions(+), 184 deletions(-) diff --git a/ChangeLog b/ChangeLog index 63c9d56a..18d72ebd 100644 --- a/ChangeLog +++ b/ChangeLog @@ -26,6 +26,8 @@ brevity. Much more detail can be found in the git revision history: with interposed resets (triggered via the "prof.reset" mallctl). This bug could cause data structure corruption that would most likely result in a segfault. + - Fix xallocx() bugs related to the 'extra' parameter when specified as + non-zero. * 4.0.0 (August 17, 2015) diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 4c1a471a..f77f2574 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -488,7 +488,7 @@ extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t size, size_t extra, size_t alignment, bool zero, tcache_t *tcache); + size_t size, size_t alignment, bool zero, tcache_t *tcache); dss_prec_t arena_dss_prec_get(arena_t *arena); bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); ssize_t arena_lg_dirty_mult_default_get(void); diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h index 8b6c6cec..328eeed7 100644 --- a/include/jemalloc/internal/huge.h +++ b/include/jemalloc/internal/huge.h @@ -13,11 +13,10 @@ void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, tcache_t *tcache); void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment, bool zero, tcache_t *tcache); -bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); +bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, - size_t size, size_t extra, size_t alignment, bool zero, - tcache_t *tcache); + size_t usize, size_t alignment, bool zero, tcache_t *tcache); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index f6e464e9..a341b253 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -1096,7 +1096,7 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, zero, tcache, arena)); } - return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero, + return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero, tcache)); } diff --git a/include/jemalloc/internal/size_classes.sh b/include/jemalloc/internal/size_classes.sh index 1c2d6816..fc82036d 100755 --- a/include/jemalloc/internal/size_classes.sh +++ b/include/jemalloc/internal/size_classes.sh @@ -167,6 +167,8 @@ size_classes() { lg_large_minclass=$((${lg_grp} + 2)) fi fi + # Final written value is correct: + huge_maxclass="((((size_t)1) << ${lg_grp}) + (((size_t)${ndelta}) << ${lg_delta}))" index=$((${index} + 1)) ndelta=$((${ndelta} + 1)) done @@ -185,6 +187,7 @@ size_classes() { # - lookup_maxclass # - small_maxclass # - lg_large_minclass + # - huge_maxclass } cat <> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t followsize; - size_t usize_min = s2u(size); assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - large_pad); /* Try to extend the run. */ - assert(usize_min > oldsize); malloc_mutex_lock(&arena->lock); - if (pageind+npages < chunk_npages && - arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && - (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= usize_min - oldsize) { + if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, + pageind+npages) != 0) + goto label_fail; + followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); + if (oldsize + followsize >= usize_min) { /* * The next run is available and sufficiently large. Split the * following run, then merge the first part with the existing * allocation. */ arena_run_t *run; - size_t flag_dirty, flag_unzeroed_mask, splitsize, usize; + size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; - usize = s2u(size + extra); + usize = usize_max; while (oldsize + followsize < usize) usize = index2size(size2index(usize)-1); assert(usize >= usize_min); + assert(usize >= oldsize); splitsize = usize - oldsize; + if (splitsize == 0) + goto label_fail; run = &arena_miscelm_get(chunk, pageind+npages)->run; - if (arena_run_split_large(arena, run, splitsize, zero)) { - malloc_mutex_unlock(&arena->lock); - return (true); - } + if (arena_run_split_large(arena, run, splitsize, zero)) + goto label_fail; size = oldsize + splitsize; npages = (size + large_pad) >> LG_PAGE; @@ -2719,8 +2719,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, malloc_mutex_unlock(&arena->lock); return (false); } +label_fail: malloc_mutex_unlock(&arena->lock); - return (true); } @@ -2749,98 +2749,114 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large = * always fail if growing an object, and the following run is already in use. */ static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) +arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) { - size_t usize; + arena_chunk_t *chunk; + arena_t *arena; - /* Make sure extra can't cause size_t overflow. */ - if (unlikely(extra >= arena_maxclass)) - return (true); - - usize = s2u(size + extra); - if (usize == oldsize) { - /* Same size class. */ + if (oldsize == usize_max) { + /* Current size class is compatible and maximal. */ return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); - - if (usize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, usize); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - usize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, size, extra, zero); - if (config_fill && !ret && !zero) { - if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + - oldsize), 0xa5, isalloc(ptr, - config_prof) - oldsize); - } else if (unlikely(opt_zero)) { - memset((void *)((uintptr_t)ptr + - oldsize), 0, isalloc(ptr, - config_prof) - oldsize); - } - } - return (ret); - } } + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); + + if (oldsize < usize_max) { + bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, + usize_min, usize_max, zero); + if (config_fill && !ret && !zero) { + if (unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, + isalloc(ptr, config_prof) - oldsize); + } else if (unlikely(opt_zero)) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + isalloc(ptr, config_prof) - oldsize); + } + } + return (ret); + } + + assert(oldsize > usize_max); + /* Fill before shrinking in order avoid a race. */ + arena_ralloc_junk_large(ptr, oldsize, usize_max); + arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); + return (false); } bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { + size_t usize_min, usize_max; - if (likely(size <= arena_maxclass)) { + /* Check for size overflow. */ + if (unlikely(size > HUGE_MAXCLASS)) + return (true); + usize_min = s2u(size); + /* Clamp extra if necessary to avoid (size + extra) overflow. */ + if (unlikely(size + extra > HUGE_MAXCLASS)) + extra = HUGE_MAXCLASS - size; + usize_max = s2u(size + extra); + + if (likely(oldsize <= arena_maxclass && usize_min <= arena_maxclass)) { /* * Avoid moving the allocation if the size class can be left the * same. */ - if (likely(oldsize <= arena_maxclass)) { - if (oldsize <= SMALL_MAXCLASS) { - assert( - arena_bin_info[size2index(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - size2index(size + extra) == - size2index(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) + if (oldsize <= SMALL_MAXCLASS) { + assert(arena_bin_info[size2index(oldsize)].reg_size == + oldsize); + if ((usize_max <= SMALL_MAXCLASS && + size2index(usize_max) == size2index(oldsize)) || + (size <= oldsize && usize_max >= oldsize)) + return (false); + } else { + if (usize_max > SMALL_MAXCLASS) { + if (!arena_ralloc_large(ptr, oldsize, usize_min, + usize_max, zero)) return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (!arena_ralloc_large(ptr, oldsize, - size, extra, zero)) - return (false); - } } } /* Reallocation would require a move. */ return (true); - } else - return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); + } else { + return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, + zero)); + } +} + +static void * +arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ + + if (alignment == 0) + return (arena_malloc(tsd, arena, usize, zero, tcache)); + usize = sa2u(usize, alignment); + if (usize == 0) + return (NULL); + return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); } void * arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache) + size_t alignment, bool zero, tcache_t *tcache) { void *ret; + size_t usize; - if (likely(size <= arena_maxclass)) { + usize = s2u(size); + if (usize == 0) + return (NULL); + + if (likely(usize <= arena_maxclass)) { size_t copysize; /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) + if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) return (ptr); /* @@ -2848,53 +2864,23 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, * the object. In that case, fall back to allocating new space * and copying. */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(tsd, usize, alignment, zero, tcache, - arena); - } else { - ret = arena_malloc(tsd, arena, size + extra, zero, - tcache); - } - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment != 0) { - size_t usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(tsd, usize, alignment, zero, - tcache, arena); - } else { - ret = arena_malloc(tsd, arena, size, zero, - tcache); - } - - if (ret == NULL) - return (NULL); - } + ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, + zero, tcache); + if (ret == NULL) + return (NULL); /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). */ - /* - * Copy at most size bytes (not size+extra), since the caller - * has no expectation that the extra bytes will be reliably - * preserved. - */ - copysize = (size < oldsize) ? size : oldsize; + copysize = (usize < oldsize) ? usize : oldsize; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache); } else { - ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra, - alignment, zero, tcache); + ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, + zero, tcache); } return (ret); } diff --git a/src/huge.c b/src/huge.c index 4d5887c4..187bdaa9 100644 --- a/src/huge.c +++ b/src/huge.c @@ -126,18 +126,19 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void -huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize, - size_t size, size_t extra, bool zero) +huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) { - size_t usize_next; + size_t usize, usize_next; extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; bool zeroed; /* Increase usize to incorporate extra. */ - while (usize < s2u(size+extra) && (usize_next = s2u(usize+1)) < oldsize) - usize = usize_next; + for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) + <= oldsize; usize = usize_next) + ; /* Do nothing. */ if (oldsize == usize) return; @@ -195,6 +196,8 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) arena = extent_node_arena_get(node); chunk_hooks = chunk_hooks_get(arena); + assert(oldsize > usize); + /* Split excess chunks. */ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), @@ -230,18 +233,11 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize) } static bool -huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) { - size_t usize; +huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; - usize = s2u(size); - if (usize == 0) { - /* size_t overflow. */ - return (true); - } - node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(&arena->huge_mtx); @@ -282,89 +278,76 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t size, bool zero) { } bool -huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) +huge_ralloc_no_move(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) { - size_t usize; - - /* Both allocations must be huge to avoid a move. */ - if (oldsize < chunksize) - return (true); assert(s2u(oldsize) == oldsize); - usize = s2u(size); - if (usize == 0) { - /* size_t overflow. */ + + /* Both allocations must be huge to avoid a move. */ + if (oldsize < chunksize || usize_max < chunksize) return (true); + + if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { + /* Attempt to expand the allocation in-place. */ + if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max, zero)) + return (false); + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && CHUNK_CEILING(usize_min) > + CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr, + oldsize, usize_min, zero)) + return (false); } /* * Avoid moving the allocation if the existing chunk size accommodates * the new size. */ - if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize) - && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(s2u(size+extra))) { - huge_ralloc_no_move_similar(ptr, oldsize, usize, size, extra, + if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) + && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { + huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max, zero); return (false); } /* Attempt to shrink the allocation in-place. */ - if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) - return (huge_ralloc_no_move_shrink(ptr, oldsize, usize)); + if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) + return (huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)); + return (true); +} - /* Attempt to expand the allocation in-place. */ - if (huge_ralloc_no_move_expand(ptr, oldsize, size + extra, zero)) { - if (extra == 0) - return (true); +static void * +huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ - /* Try again, this time without extra. */ - return (huge_ralloc_no_move_expand(ptr, oldsize, size, zero)); - } - return (false); + if (alignment <= chunksize) + return (huge_malloc(tsd, arena, usize, zero, tcache)); + return (huge_palloc(tsd, arena, usize, alignment, zero, tcache)); } void * -huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache) +huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; /* Try to avoid moving the allocation. */ - if (!huge_ralloc_no_move(ptr, oldsize, size, extra, zero)) + if (!huge_ralloc_no_move(ptr, oldsize, usize, usize, zero)) return (ptr); /* - * size and oldsize are different enough that we need to use a + * usize and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ - if (alignment > chunksize) { - ret = huge_palloc(tsd, arena, size + extra, alignment, zero, - tcache); - } else - ret = huge_malloc(tsd, arena, size + extra, zero, tcache); + ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero, + tcache); + if (ret == NULL) + return (NULL); - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment > chunksize) { - ret = huge_palloc(tsd, arena, size, alignment, zero, - tcache); - } else - ret = huge_malloc(tsd, arena, size, zero, tcache); - - if (ret == NULL) - return (NULL); - } - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; + copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache); return (ret); diff --git a/test/integration/rallocx.c b/test/integration/rallocx.c index 8b6cde31..be1b27b7 100644 --- a/test/integration/rallocx.c +++ b/test/integration/rallocx.c @@ -22,7 +22,7 @@ TEST_BEGIN(test_grow_and_shrink) szs[j-1], szs[j-1]+1); szs[j] = sallocx(q, 0); assert_zu_ne(szs[j], szs[j-1]+1, - "Expected size to at least: %zu", szs[j-1]+1); + "Expected size to be at least: %zu", szs[j-1]+1); p = q; } diff --git a/test/integration/xallocx.c b/test/integration/xallocx.c index ab4cf945..8f0de630 100644 --- a/test/integration/xallocx.c +++ b/test/integration/xallocx.c @@ -48,6 +48,243 @@ TEST_BEGIN(test_no_move_fail) } TEST_END +static unsigned +get_nsizes_impl(const char *cmd) +{ + unsigned ret; + size_t z; + + z = sizeof(unsigned); + assert_d_eq(mallctl(cmd, &ret, &z, NULL, 0), 0, + "Unexpected mallctl(\"%s\", ...) failure", cmd); + + return (ret); +} + +static unsigned +get_nsmall(void) +{ + + return (get_nsizes_impl("arenas.nbins")); +} + +static unsigned +get_nlarge(void) +{ + + return (get_nsizes_impl("arenas.nlruns")); +} + +static unsigned +get_nhuge(void) +{ + + return (get_nsizes_impl("arenas.nhchunks")); +} + +static size_t +get_size_impl(const char *cmd, size_t ind) +{ + size_t ret; + size_t z; + size_t mib[4]; + size_t miblen = 4; + + z = sizeof(size_t); + assert_d_eq(mallctlnametomib(cmd, mib, &miblen), + 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); + mib[2] = ind; + z = sizeof(size_t); + assert_d_eq(mallctlbymib(mib, miblen, &ret, &z, NULL, 0), + 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); + + return (ret); +} + +static size_t +get_small_size(size_t ind) +{ + + return (get_size_impl("arenas.bin.0.size", ind)); +} + +static size_t +get_large_size(size_t ind) +{ + + return (get_size_impl("arenas.lrun.0.size", ind)); +} + +static size_t +get_huge_size(size_t ind) +{ + + return (get_size_impl("arenas.hchunk.0.size", ind)); +} + +TEST_BEGIN(test_extra_small) +{ + size_t small0, small1, hugemax; + void *p; + + /* Get size classes. */ + small0 = get_small_size(0); + small1 = get_small_size(1); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(small0, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small1, 0, 0), small0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, + "Unexpected xallocx() behavior"); + + /* Test size+extra overflow. */ + assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_large) +{ + size_t smallmax, large0, large1, large2, huge0, hugemax; + void *p; + + /* Get size classes. */ + smallmax = get_small_size(get_nsmall()-1); + large0 = get_large_size(0); + large1 = get_large_size(1); + large2 = get_large_size(2); + huge0 = get_huge_size(0); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(large2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, large2, 0, 0), large2, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_eq(xallocx(p, large0, 0, 0), large0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, smallmax, 0, 0), large0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large2, 0, 0), large2, + "Unexpected xallocx() behavior"); + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large1, large2 - large1, 0), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, large0, large1 - large0, 0), large1, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, 0), large0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, 0), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_eq(xallocx(p, large2, 0, 0), large2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge0, 0, 0), large2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, 0), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_lt(xallocx(p, large0, huge0 - large0, 0), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large0, 0, 0), large0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_eq(xallocx(p, large0, large2 - large0, 0), large2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, large2, 0, 0), large2, + "Unexpected xallocx() behavior"); + /* Test size+extra overflow. */ + assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, 0), huge0, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + +TEST_BEGIN(test_extra_huge) +{ + size_t largemax, huge0, huge1, huge2, hugemax; + void *p; + + /* Get size classes. */ + largemax = get_large_size(get_nlarge()-1); + huge0 = get_huge_size(0); + huge1 = get_huge_size(1); + huge2 = get_huge_size(2); + hugemax = get_huge_size(get_nhuge()-1); + + p = mallocx(huge2, 0); + assert_ptr_not_null(p, "Unexpected mallocx() error"); + + assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, + "Unexpected xallocx() behavior"); + /* Test size decrease with zero extra. */ + assert_zu_eq(xallocx(p, huge0, 0, 0), huge0, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, largemax, 0, 0), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, + "Unexpected xallocx() behavior"); + /* Test size decrease with non-zero extra. */ + assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge1, huge2 - huge1, 0), huge2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, huge0, huge1 - huge0, 0), huge1, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, largemax, huge0 - largemax, 0), huge0, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge0, 0, 0), huge0, + "Unexpected xallocx() behavior"); + /* Test size increase with zero extra. */ + assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, + "Unexpected xallocx() behavior"); + assert_zu_eq(xallocx(p, hugemax+1, 0, 0), huge2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge0, 0, 0), huge0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_le(xallocx(p, huge0, SIZE_T_MAX - huge0, 0), hugemax, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge0, 0, 0), huge0, + "Unexpected xallocx() behavior"); + /* Test size increase with non-zero extra. */ + assert_zu_eq(xallocx(p, huge0, huge2 - huge0, 0), huge2, + "Unexpected xallocx() behavior"); + + assert_zu_eq(xallocx(p, huge2, 0, 0), huge2, + "Unexpected xallocx() behavior"); + /* Test size+extra overflow. */ + assert_zu_le(xallocx(p, huge2, hugemax - huge2 + 1, 0), hugemax, + "Unexpected xallocx() behavior"); + + dallocx(p, 0); +} +TEST_END + int main(void) { @@ -55,5 +292,8 @@ main(void) return (test( test_same_size, test_extra_no_move, - test_no_move_fail)); + test_no_move_fail, + test_extra_small, + test_extra_large, + test_extra_huge)); }