diff --git a/src/huge.c b/src/huge.c index cecaf2df..6d86aed8 100644 --- a/src/huge.c +++ b/src/huge.c @@ -171,6 +171,16 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, abort(); memcpy(ret, ptr, copysize); chunk_dealloc_mmap(ptr, oldsize); + } else if (config_fill && zero == false && opt_junk && oldsize + < newsize) { + /* + * mremap(2) clobbers the original mapping, so + * junk/zero filling is not preserved. There is no + * need to zero fill here, since any trailing + * uninititialized memory is demand-zeroed by the + * kernel, but junk filling must be redone. + */ + memset(ret + oldsize, 0xa5, newsize - oldsize); } } else #endif diff --git a/test/unit/junk.c b/test/unit/junk.c index e27db2fe..ef8f9c16 100644 --- a/test/unit/junk.c +++ b/test/unit/junk.c @@ -92,9 +92,12 @@ test_junk(size_t sz_min, size_t sz_max) s = (char *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); - assert_ptr_eq(most_recently_junked, junked, - "Expected region of size %zu to be junk-filled", - sz); + if (!config_mremap || sz+1 <= arena_maxclass) { + assert_ptr_eq(most_recently_junked, junked, + "Expected region of size %zu to be " + "junk-filled", + sz); + } } }