Refactor small_size2bin and small_bin2size.

Refactor small_size2bin and small_bin2size to be inline functions rather
than directly accessed arrays.
This commit is contained in:
Jason Evans 2014-04-16 17:14:33 -07:00
parent 0b49403958
commit 3541a904d6
5 changed files with 61 additions and 29 deletions

View File

@ -380,13 +380,17 @@ struct arena_s {
extern ssize_t opt_lg_dirty_mult;
/*
* small_size2bin is a compact lookup table that rounds request sizes up to
* small_size2bin_tab is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via the SMALL_SIZE2BIN macro.
* and all accesses are via small_size2bin().
*/
extern uint8_t const small_size2bin[];
extern uint32_t const small_bin2size[];
#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
extern uint8_t const small_size2bin_tab[];
/*
* small_bin2size_tab duplicates information in arena_bin_info, but in a const
* array, for which it is easier for the compiler to optimize repeated
* dereferences.
*/
extern uint32_t const small_bin2size_tab[NBINS];
extern arena_bin_info_t arena_bin_info[NBINS];
@ -450,6 +454,8 @@ void arena_postfork_child(arena_t *arena);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
size_t small_size2bin(size_t size);
size_t small_bin2size(size_t binind);
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
@ -492,6 +498,22 @@ void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache);
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE size_t
small_size2bin(size_t size)
{
return ((size_t)(small_size2bin_tab[(size-1) >> LG_TINY_MIN]));
}
JEMALLOC_ALWAYS_INLINE size_t
small_bin2size(size_t binind)
{
return ((size_t)(small_bin2size_tab[binind]));
}
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
{
@ -773,9 +795,9 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
return (binind);
}
# endif /* JEMALLOC_ARENA_INLINE_A */
# endif /* JEMALLOC_ARENA_INLINE_B */
# ifdef JEMALLOC_ARENA_INLINE_B
# ifdef JEMALLOC_ARENA_INLINE_C
JEMALLOC_INLINE size_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
@ -965,7 +987,7 @@ arena_salloc(const void *ptr, bool demote)
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) == binind);
ret = small_bin2size[binind];
ret = small_bin2size(binind);
}
return (ret);
@ -1004,7 +1026,7 @@ arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache)
arena_dalloc_large(chunk->arena, chunk, ptr);
}
}
# endif /* JEMALLOC_ARENA_INLINE_B */
# endif /* JEMALLOC_ARENA_INLINE_C */
#endif
#endif /* JEMALLOC_H_INLINES */

View File

@ -499,6 +499,14 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/chunk.h"
#include "jemalloc/internal/huge.h"
/*
* Include arena.h the first time in order to provide inline functions for this
* header's inlines.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#ifndef JEMALLOC_ENABLE_INLINE
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
@ -526,7 +534,7 @@ s2u(size_t size)
{
if (size <= SMALL_MAXCLASS)
return (small_bin2size[SMALL_SIZE2BIN(size)]);
return (small_bin2size(small_size2bin(size)));
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
@ -569,7 +577,7 @@ sa2u(size_t size, size_t alignment)
if (usize <= arena_maxclass && alignment <= PAGE) {
if (usize <= SMALL_MAXCLASS)
return (small_bin2size[SMALL_SIZE2BIN(usize)]);
return (small_bin2size(small_size2bin(usize)));
return (PAGE_CEILING(usize));
} else {
size_t run_size;
@ -643,16 +651,16 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/bitmap.h"
#include "jemalloc/internal/rtree.h"
/*
* Include arena.h twice in order to resolve circular dependencies with
* tcache.h.
* Include arena.h the second and third times in order to resolve circular
* dependencies with tcache.h.
*/
#define JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_A
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_B
#include "jemalloc/internal/tcache.h"
#define JEMALLOC_ARENA_INLINE_C
#include "jemalloc/internal/arena.h"
#undef JEMALLOC_ARENA_INLINE_C
#include "jemalloc/internal/hash.h"
#include "jemalloc/internal/quarantine.h"
@ -794,7 +802,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
size_t binind = SMALL_SIZE2BIN(usize);
size_t binind = small_size2bin(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;

View File

@ -346,7 +346,9 @@ s2u
sa2u
set_errno
small_bin2size
small_bin2size_tab
small_size2bin
small_size2bin_tab
stats_cactive
stats_cactive_add
stats_cactive_get

View File

@ -263,10 +263,10 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
size_t binind;
tcache_bin_t *tbin;
binind = SMALL_SIZE2BIN(size);
binind = small_size2bin(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
size = small_bin2size[binind];
size = small_bin2size(binind);
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);

View File

@ -8,7 +8,7 @@ ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
arena_bin_info_t arena_bin_info[NBINS];
JEMALLOC_ALIGNED(CACHELINE)
const uint32_t small_bin2size[NBINS] = {
const uint32_t small_bin2size_tab[NBINS] = {
#define SIZE_CLASS(bin, delta, size) \
size,
SIZE_CLASSES
@ -16,7 +16,7 @@ const uint32_t small_bin2size[NBINS] = {
};
JEMALLOC_ALIGNED(CACHELINE)
const uint8_t small_size2bin[] = {
const uint8_t small_size2bin_tab[] = {
#define S2B_8(i) i,
#define S2B_16(i) S2B_8(i) S2B_8(i)
#define S2B_32(i) S2B_16(i) S2B_16(i)
@ -1607,7 +1607,7 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
assert(opt_quarantine);
assert(usize <= SMALL_MAXCLASS);
binind = SMALL_SIZE2BIN(usize);
binind = small_size2bin(usize);
bin_info = &arena_bin_info[binind];
arena_redzones_validate(ptr, bin_info, true);
}
@ -1620,10 +1620,10 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
arena_run_t *run;
size_t binind;
binind = SMALL_SIZE2BIN(size);
binind = small_size2bin(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
size = small_bin2size[binind];
size = small_bin2size(binind);
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
@ -1777,7 +1777,7 @@ arena_prof_promoted(const void *ptr, size_t size)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
binind = SMALL_SIZE2BIN(size);
binind = small_size2bin(size);
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
@ -2164,11 +2164,11 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
if (oldsize <= arena_maxclass) {
if (oldsize <= SMALL_MAXCLASS) {
assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
assert(arena_bin_info[small_size2bin(oldsize)].reg_size
== oldsize);
if ((size + extra <= SMALL_MAXCLASS &&
SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
small_size2bin(size + extra) ==
small_size2bin(oldsize)) || (size <= oldsize &&
size + extra >= oldsize))
return (false);
} else {