Omit chunk header in arena chunk map.
Omit the first map_bias elements of the map in arena_chunk_t. This avoids barely spilling over into an extra chunk header page for common chunk sizes.
This commit is contained in:
@@ -187,7 +187,12 @@ struct arena_chunk_s {
|
||||
/* Number of dirty pages. */
|
||||
size_t ndirty;
|
||||
|
||||
/* Map of pages within chunk that keeps track of free/large/small. */
|
||||
/*
|
||||
* Map of pages within chunk that keeps track of free/large/small. The
|
||||
* first map_bias entries are omitted, since the chunk header does not
|
||||
* need to be tracked in the map. This omission saves a header page
|
||||
* for common chunk sizes (e.g. 4 MiB).
|
||||
*/
|
||||
arena_chunk_map_t map[1]; /* Dynamically sized. */
|
||||
};
|
||||
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
|
||||
@@ -416,7 +421,7 @@ extern size_t sspace_min;
|
||||
extern size_t sspace_max;
|
||||
#define small_maxclass sspace_max
|
||||
|
||||
#define nlclasses (chunk_npages - arena_chunk_header_npages)
|
||||
#define nlclasses (chunk_npages - map_bias)
|
||||
|
||||
void arena_purge_all(arena_t *arena);
|
||||
#ifdef JEMALLOC_PROF
|
||||
@@ -478,8 +483,8 @@ arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
|
||||
assert(ptr != NULL);
|
||||
assert(CHUNK_ADDR2BASE(ptr) != ptr);
|
||||
|
||||
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
mapelm = &chunk->map[pageind];
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
|
||||
if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
|
||||
/* Small allocation. */
|
||||
|
@@ -46,7 +46,7 @@ extern rtree_t *chunks_rtree;
|
||||
extern size_t chunksize;
|
||||
extern size_t chunksize_mask; /* (chunksize - 1). */
|
||||
extern size_t chunk_npages;
|
||||
extern size_t arena_chunk_header_npages;
|
||||
extern size_t map_bias; /* Number of arena chunk header pages. */
|
||||
extern size_t arena_maxclass; /* Max size class for arenas. */
|
||||
|
||||
void *chunk_alloc(size_t size, bool base, bool *zero);
|
||||
|
@@ -17,6 +17,10 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stddef.h>
|
||||
#ifndef offsetof
|
||||
# define offsetof(type, member) ((size_t)&(((type *)NULL)->member))
|
||||
#endif
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
|
@@ -282,7 +282,8 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
|
||||
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
|
||||
size_t pageind = (unsigned)(((uintptr_t)ret - (uintptr_t)chunk)
|
||||
>> PAGE_SHIFT);
|
||||
chunk->map[pageind].bits |= CHUNK_MAP_CLASS_MASK;
|
||||
chunk->map[pageind-map_bias].bits |=
|
||||
CHUNK_MAP_CLASS_MASK;
|
||||
#endif
|
||||
if (zero == false) {
|
||||
#ifdef JEMALLOC_FILL
|
||||
@@ -321,8 +322,8 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
mapelm = &chunk->map[pageind];
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
|
||||
(mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
|
||||
assert(run->magic == ARENA_RUN_MAGIC);
|
||||
@@ -369,8 +370,8 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size)
|
||||
|
||||
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
|
||||
arena = chunk->arena;
|
||||
pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT);
|
||||
mapelm = &chunk->map[pageind];
|
||||
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
|
||||
mapelm = &chunk->map[pageind-map_bias];
|
||||
binind = nbins + (size >> PAGE_SHIFT) - 1;
|
||||
|
||||
#ifdef JEMALLOC_FILL
|
||||
|
Reference in New Issue
Block a user