Fix huge_ralloc to maintain chunk statistics.

Fix huge_ralloc() to properly maintain chunk statistics when using
mremap(2).
This commit is contained in:
Jason Evans 2011-11-11 14:41:59 -08:00
parent fa351d9fdc
commit 12a4887826
4 changed files with 17 additions and 14 deletions

View File

@ -50,7 +50,7 @@ extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, bool base, bool *zero);
void chunk_dealloc(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */

View File

@ -569,7 +569,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena->ndirty -= spare->ndirty;
}
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize);
chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
#ifdef JEMALLOC_STATS
arena->stats.mapped -= chunksize;

View File

@ -70,7 +70,7 @@ RETURN:
#ifdef JEMALLOC_IVSALLOC
if (base == false && ret != NULL) {
if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
chunk_dealloc(ret, size);
chunk_dealloc(ret, size, true);
return (NULL);
}
}
@ -108,7 +108,7 @@ RETURN:
}
void
chunk_dealloc(void *chunk, size_t size)
chunk_dealloc(void *chunk, size_t size, bool unmap)
{
assert(chunk != NULL);
@ -125,15 +125,17 @@ chunk_dealloc(void *chunk, size_t size)
malloc_mutex_unlock(&chunks_mtx);
#endif
if (unmap) {
#ifdef JEMALLOC_SWAP
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
return;
if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
return;
#endif
#ifdef JEMALLOC_DSS
if (chunk_dealloc_dss(chunk, size) == false)
return;
if (chunk_dealloc_dss(chunk, size) == false)
return;
#endif
chunk_dealloc_mmap(chunk, size);
chunk_dealloc_mmap(chunk, size);
}
}
bool

View File

@ -110,12 +110,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (offset == 0) {
/* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- chunk_size);
- chunk_size, true);
} else {
size_t trailsize;
/* Trim leading space. */
chunk_dealloc(ret, alignment - offset);
chunk_dealloc(ret, alignment - offset, true);
ret = (void *)((uintptr_t)ret + (alignment - offset));
@ -124,7 +124,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
/* Trim trailing space. */
assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
trailsize);
trailsize, true);
}
}
@ -260,7 +260,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
chunk_dealloc(ptr, oldsize);
chunk_dealloc_mmap(ptr, oldsize);
}
} else
#endif
@ -301,9 +301,10 @@ huge_dalloc(void *ptr, bool unmap)
memset(node->addr, 0x5a, node->size);
#endif
#endif
chunk_dealloc(node->addr, node->size);
}
chunk_dealloc(node->addr, node->size, unmap);
base_node_dealloc(node);
}