Fix chunk allocation/deallocation bugs.

Fix chunk_alloc_dss() to zero memory when requested.

Fix chunk_dealloc() to avoid chunk_dealloc_mmap() for dss-allocated
memory.

Fix huge_palloc() to always junk fill when requested.

Improve chunk_recycle() to report that memory is zeroed as a side effect
of pages_purge().
This commit is contained in:
Jason Evans 2012-04-21 16:04:51 -07:00
parent 8f0e0eb1c0
commit 7ad54c1c30
4 changed files with 26 additions and 7 deletions

View File

@ -111,7 +111,7 @@ a_name##_tsd_cleanup_wrapper(void) \
\ \
if (a_name##_initialized) { \ if (a_name##_initialized) { \
a_name##_initialized = false; \ a_name##_initialized = false; \
a_cleanup(&a_name##_tls); \ a_cleanup(&a_name##_tls); \
} \ } \
return (a_name##_initialized); \ return (a_name##_initialized); \
} \ } \

View File

@ -98,7 +98,10 @@ chunk_recycle(size_t size, size_t alignment, bool *zero)
if (node != NULL) if (node != NULL)
base_node_dealloc(node); base_node_dealloc(node);
#ifdef JEMALLOC_PURGE_MADVISE_FREE #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
/* Pages are zeroed as a side effect of pages_purge(). */
*zero = true;
#else
if (*zero) { if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size); VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size); memset(ret, 0, size);
@ -161,7 +164,13 @@ label_return:
if (config_prof && opt_prof && opt_prof_gdump && gdump) if (config_prof && opt_prof && opt_prof_gdump && gdump)
prof_gdump(); prof_gdump();
} }
if (config_debug && *zero && ret != NULL) {
size_t i;
size_t *p = (size_t *)(uintptr_t)ret;
for (i = 0; i < size / sizeof(size_t); i++)
assert(p[i] == 0);
}
assert(CHUNK_ADDR2BASE(ret) == ret); assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret); return (ret);
} }
@ -258,9 +267,9 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
} }
if (unmap) { if (unmap) {
if (chunk_dealloc_mmap(chunk, size) == false) if ((config_dss && chunk_in_dss(chunk)) ||
return; chunk_dealloc_mmap(chunk, size))
chunk_record(chunk, size); chunk_record(chunk, size);
} }
} }

View File

@ -89,6 +89,10 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
malloc_mutex_unlock(&dss_mtx); malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0) if (cpad_size != 0)
chunk_dealloc(cpad, cpad_size, true); chunk_dealloc(cpad, cpad_size, true);
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
return (ret); return (ret);
} }
} while (dss_prev != (void *)-1); } while (dss_prev != (void *)-1);

View File

@ -28,6 +28,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
void *ret; void *ret;
size_t csize; size_t csize;
extent_node_t *node; extent_node_t *node;
bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */ /* Allocate one or more contiguous chunks for this request. */
@ -42,7 +43,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (node == NULL) if (node == NULL)
return (NULL); return (NULL);
ret = chunk_alloc(csize, alignment, false, &zero); /*
* Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
ret = chunk_alloc(csize, alignment, false, &is_zeroed);
if (ret == NULL) { if (ret == NULL) {
base_node_dealloc(node); base_node_dealloc(node);
return (NULL); return (NULL);
@ -64,7 +70,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (config_fill && zero == false) { if (config_fill && zero == false) {
if (opt_junk) if (opt_junk)
memset(ret, 0xa5, csize); memset(ret, 0xa5, csize);
else if (opt_zero) else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize); memset(ret, 0, csize);
} }