Fix huge_ralloc to maintain chunk statistics.

Fix huge_ralloc() to properly maintain chunk statistics when using
mremap(2).
This commit is contained in:
Jason Evans 2011-11-11 14:41:59 -08:00
parent 03bf7a7a26
commit 115704dcdb
4 changed files with 17 additions and 14 deletions

View File

@ -50,7 +50,7 @@ extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */ extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, bool base, bool *zero); void *chunk_alloc(size_t size, bool base, bool *zero);
void chunk_dealloc(void *chunk, size_t size); void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void); bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */ #endif /* JEMALLOC_H_EXTERNS */

View File

@ -569,7 +569,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena->ndirty -= spare->ndirty; arena->ndirty -= spare->ndirty;
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize); chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS #ifdef JEMALLOC_STATS
arena->stats.mapped -= chunksize; arena->stats.mapped -= chunksize;

View File

@ -70,7 +70,7 @@ RETURN:
#ifdef JEMALLOC_IVSALLOC #ifdef JEMALLOC_IVSALLOC
if (base == false && ret != NULL) { if (base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size); chunk_dealloc(ret, size, true);
return (NULL); return (NULL);
} }
} }
@ -108,7 +108,7 @@ RETURN:
} }
void void
chunk_dealloc(void *chunk, size_t size) chunk_dealloc(void *chunk, size_t size, bool unmap)
{ {
assert(chunk != NULL); assert(chunk != NULL);
@ -125,6 +125,7 @@ chunk_dealloc(void *chunk, size_t size)
malloc_mutex_unlock(&chunks_mtx); malloc_mutex_unlock(&chunks_mtx);
#endif #endif
if (unmap) {
#ifdef JEMALLOC_SWAP #ifdef JEMALLOC_SWAP
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false) if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
return; return;
@ -134,6 +135,7 @@ chunk_dealloc(void *chunk, size_t size)
return; return;
#endif #endif
chunk_dealloc_mmap(chunk, size); chunk_dealloc_mmap(chunk, size);
}
} }
bool bool

View File

@ -110,12 +110,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (offset == 0) { if (offset == 0) {
/* Trim trailing space. */ /* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size); - chunk_size, true);
} else { } else {
size_t trailsize; size_t trailsize;
/* Trim leading space. */ /* Trim leading space. */
chunk_dealloc(ret, alignment - offset); chunk_dealloc(ret, alignment - offset, true);
ret = (void *)((uintptr_t)ret + (alignment - offset)); ret = (void *)((uintptr_t)ret + (alignment - offset));
@ -124,7 +124,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
/* Trim trailing space. */ /* Trim trailing space. */
assert(trailsize < alloc_size); assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize); trailsize, true);
} }
} }
@ -260,7 +260,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (opt_abort) if (opt_abort)
abort(); abort();
memcpy(ret, ptr, copysize); memcpy(ret, ptr, copysize);
chunk_dealloc(ptr, oldsize); chunk_dealloc_mmap(ptr, oldsize);
} }
} else } else
#endif #endif
@ -301,9 +301,10 @@ huge_dalloc(void *ptr, bool unmap)
memset(node->addr, 0x5a, node->size); memset(node->addr, 0x5a, node->size);
#endif #endif
#endif #endif
chunk_dealloc(node->addr, node->size);
} }
chunk_dealloc(node->addr, node->size, unmap);
base_node_dealloc(node); base_node_dealloc(node);
} }