Fix junk filling for mremap(2)-based huge reallocation.

If mremap(2) is used for huge reallocation, physical pages are mapped to
new virtual addresses rather than data being copied to new pages.  This
bypasses the normal junk filling that would happen during allocation, so
add junk filling that is specific to this case.
This commit is contained in:
Jason Evans 2014-02-25 11:58:50 -08:00
parent cb657e3170
commit 940fdfd5ee
2 changed files with 16 additions and 3 deletions

View File

@ -171,6 +171,16 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
abort();
memcpy(ret, ptr, copysize);
chunk_dealloc_mmap(ptr, oldsize);
} else if (config_fill && zero == false && opt_junk && oldsize
< newsize) {
/*
* mremap(2) clobbers the original mapping, so
* junk/zero filling is not preserved. There is no
* need to zero fill here, since any trailing
* uninititialized memory is demand-zeroed by the
* kernel, but junk filling must be redone.
*/
memset(ret + oldsize, 0xa5, newsize - oldsize);
}
} else
#endif

View File

@ -92,9 +92,12 @@ test_junk(size_t sz_min, size_t sz_max)
s = (char *)rallocx(s, sz+1, 0);
assert_ptr_not_null((void *)s,
"Unexpected rallocx() failure");
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be junk-filled",
sz);
if (!config_mremap || sz+1 <= arena_maxclass) {
assert_ptr_eq(most_recently_junked, junked,
"Expected region of size %zu to be "
"junk-filled",
sz);
}
}
}