Merge branch 'dev'

This commit is contained in:
Jason Evans 2010-05-11 18:24:19 -07:00
commit e13243eb63
6 changed files with 52 additions and 28 deletions

View File

@ -10,6 +10,7 @@
#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_H_EXTERNS
void *chunk_alloc_mmap(size_t size); void *chunk_alloc_mmap(size_t size);
void *chunk_alloc_mmap_noreserve(size_t size);
void chunk_dealloc_mmap(void *chunk, size_t size); void chunk_dealloc_mmap(void *chunk, size_t size);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */

View File

@ -353,7 +353,7 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
#ifdef JEMALLOC_FILL #ifdef JEMALLOC_FILL
if (opt_junk) if (opt_junk)
memset(ptr, 0x5a, arena->bins[binind].reg_size); memset(ptr, 0x5a, size);
#endif #endif
tbin = &tcache->tbins[binind]; tbin = &tcache->tbins[binind];

View File

@ -23,14 +23,15 @@ static
/******************************************************************************/ /******************************************************************************/
/* Function prototypes for non-inline static functions. */ /* Function prototypes for non-inline static functions. */
static void *pages_map(void *addr, size_t size); static void *pages_map(void *addr, size_t size, bool noreserve);
static void pages_unmap(void *addr, size_t size); static void pages_unmap(void *addr, size_t size);
static void *chunk_alloc_mmap_slow(size_t size, bool unaligned); static void *chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve);
static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
/******************************************************************************/ /******************************************************************************/
static void * static void *
pages_map(void *addr, size_t size) pages_map(void *addr, size_t size, bool noreserve)
{ {
void *ret; void *ret;
@ -38,8 +39,12 @@ pages_map(void *addr, size_t size)
* We don't use MAP_FIXED here, because it can cause the *replacement* * We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings. * of existing mappings, and we only want to create new mappings.
*/ */
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, int flags = MAP_PRIVATE | MAP_ANON;
-1, 0); #ifdef MAP_NORESERVE
if (noreserve)
flags |= MAP_NORESERVE;
#endif
ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
assert(ret != NULL); assert(ret != NULL);
if (ret == MAP_FAILED) if (ret == MAP_FAILED)
@ -83,7 +88,7 @@ pages_unmap(void *addr, size_t size)
} }
static void * static void *
chunk_alloc_mmap_slow(size_t size, bool unaligned) chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
{ {
void *ret; void *ret;
size_t offset; size_t offset;
@ -92,7 +97,7 @@ chunk_alloc_mmap_slow(size_t size, bool unaligned)
if (size + chunksize <= size) if (size + chunksize <= size)
return (NULL); return (NULL);
ret = pages_map(NULL, size + chunksize); ret = pages_map(NULL, size + chunksize, noreserve);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
@ -128,8 +133,8 @@ chunk_alloc_mmap_slow(size_t size, bool unaligned)
return (ret); return (ret);
} }
void * static void *
chunk_alloc_mmap(size_t size) chunk_alloc_mmap_internal(size_t size, bool noreserve)
{ {
void *ret; void *ret;
@ -164,7 +169,7 @@ chunk_alloc_mmap(size_t size)
if (mmap_unaligned == false) { if (mmap_unaligned == false) {
size_t offset; size_t offset;
ret = pages_map(NULL, size); ret = pages_map(NULL, size, noreserve);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
@ -173,13 +178,13 @@ chunk_alloc_mmap(size_t size)
mmap_unaligned = true; mmap_unaligned = true;
/* Try to extend chunk boundary. */ /* Try to extend chunk boundary. */
if (pages_map((void *)((uintptr_t)ret + size), if (pages_map((void *)((uintptr_t)ret + size),
chunksize - offset) == NULL) { chunksize - offset, noreserve) == NULL) {
/* /*
* Extension failed. Clean up, then revert to * Extension failed. Clean up, then revert to
* the reliable-but-expensive method. * the reliable-but-expensive method.
*/ */
pages_unmap(ret, size); pages_unmap(ret, size);
ret = chunk_alloc_mmap_slow(size, true); ret = chunk_alloc_mmap_slow(size, true, noreserve);
} else { } else {
/* Clean up unneeded leading space. */ /* Clean up unneeded leading space. */
pages_unmap(ret, chunksize - offset); pages_unmap(ret, chunksize - offset);
@ -188,11 +193,23 @@ chunk_alloc_mmap(size_t size)
} }
} }
} else } else
ret = chunk_alloc_mmap_slow(size, false); ret = chunk_alloc_mmap_slow(size, false, noreserve);
return (ret); return (ret);
} }
void *
chunk_alloc_mmap(size_t size)
{
return chunk_alloc_mmap_internal(size, false);
}
void *
chunk_alloc_mmap_noreserve(size_t size)
{
return chunk_alloc_mmap_internal(size, true);
}
void void
chunk_dealloc_mmap(void *chunk, size_t size) chunk_dealloc_mmap(void *chunk, size_t size)
{ {

View File

@ -283,7 +283,7 @@ chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
* Allocate a chunk-aligned region of anonymous memory, which will * Allocate a chunk-aligned region of anonymous memory, which will
* be the final location for the memory-mapped files. * be the final location for the memory-mapped files.
*/ */
vaddr = chunk_alloc_mmap(cumsize); vaddr = chunk_alloc_mmap_noreserve(cumsize);
if (vaddr == NULL) { if (vaddr == NULL) {
ret = true; ret = true;
goto RETURN; goto RETURN;

View File

@ -775,7 +775,7 @@ MALLOC_OUT:
#endif #endif
#ifndef NO_TLS #ifndef NO_TLS
next_arena = 0; next_arena = (narenas > 0) ? 1 : 0;
#endif #endif
/* Allocate and initialize arenas. */ /* Allocate and initialize arenas. */

View File

@ -623,13 +623,8 @@ static inline void
prof_sample_accum_update(size_t size) prof_sample_accum_update(size_t size)
{ {
if (opt_lg_prof_sample == 0) { /* Sampling logic is unnecessary if the interval is 1. */
/* assert(opt_lg_prof_sample != 0);
* Don't bother with sampling logic, since sampling interval is
* 1.
*/
return;
}
/* Take care to avoid integer overflow. */ /* Take care to avoid integer overflow. */
if (size >= prof_sample_threshold - prof_sample_accum) { if (size >= prof_sample_threshold - prof_sample_accum) {
@ -647,11 +642,15 @@ prof_sample_accum_update(size_t size)
void void
prof_malloc(const void *ptr, prof_thr_cnt_t *cnt) prof_malloc(const void *ptr, prof_thr_cnt_t *cnt)
{ {
size_t size = isalloc(ptr); size_t size;
assert(ptr != NULL); assert(ptr != NULL);
prof_sample_accum_update(size); if (opt_lg_prof_sample != 0) {
size = isalloc(ptr);
prof_sample_accum_update(size);
} else if ((uintptr_t)cnt > (uintptr_t)1U)
size = isalloc(ptr);
if ((uintptr_t)cnt > (uintptr_t)1U) { if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx); prof_ctx_set(ptr, cnt->ctx);
@ -679,11 +678,18 @@ void
prof_realloc(const void *ptr, prof_thr_cnt_t *cnt, const void *old_ptr, prof_realloc(const void *ptr, prof_thr_cnt_t *cnt, const void *old_ptr,
size_t old_size, prof_ctx_t *old_ctx) size_t old_size, prof_ctx_t *old_ctx)
{ {
size_t size = isalloc(ptr); size_t size;
prof_thr_cnt_t *told_cnt; prof_thr_cnt_t *told_cnt;
if (ptr != NULL) assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
prof_sample_accum_update(size);
if (ptr != NULL) {
if (opt_lg_prof_sample != 0) {
size = isalloc(ptr);
prof_sample_accum_update(size);
} else if ((uintptr_t)cnt > (uintptr_t)1U)
size = isalloc(ptr);
}
if ((uintptr_t)old_ctx > (uintptr_t)1U) { if ((uintptr_t)old_ctx > (uintptr_t)1U) {
told_cnt = prof_lookup(old_ctx->bt); told_cnt = prof_lookup(old_ctx->bt);