Refactor small_size2bin and small_bin2size.
Refactor small_size2bin and small_bin2size to be inline functions rather than directly accessed arrays.
This commit is contained in:
@@ -380,13 +380,17 @@ struct arena_s {
|
||||
|
||||
extern ssize_t opt_lg_dirty_mult;
|
||||
/*
|
||||
* small_size2bin is a compact lookup table that rounds request sizes up to
|
||||
* small_size2bin_tab is a compact lookup table that rounds request sizes up to
|
||||
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||
* and all accesses are via the SMALL_SIZE2BIN macro.
|
||||
* and all accesses are via small_size2bin().
|
||||
*/
|
||||
extern uint8_t const small_size2bin[];
|
||||
extern uint32_t const small_bin2size[];
|
||||
#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
|
||||
extern uint8_t const small_size2bin_tab[];
|
||||
/*
|
||||
* small_bin2size_tab duplicates information in arena_bin_info, but in a const
|
||||
* array, for which it is easier for the compiler to optimize repeated
|
||||
* dereferences.
|
||||
*/
|
||||
extern uint32_t const small_bin2size_tab[NBINS];
|
||||
|
||||
extern arena_bin_info_t arena_bin_info[NBINS];
|
||||
|
||||
@@ -450,6 +454,8 @@ void arena_postfork_child(arena_t *arena);
|
||||
#ifdef JEMALLOC_H_INLINES
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
size_t small_size2bin(size_t size);
|
||||
size_t small_bin2size(size_t binind);
|
||||
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
|
||||
size_t arena_mapbitsp_read(size_t *mapbitsp);
|
||||
@@ -492,6 +498,22 @@ void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache);
|
||||
|
||||
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
|
||||
# ifdef JEMALLOC_ARENA_INLINE_A
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_size2bin(size_t size)
|
||||
{
|
||||
|
||||
return ((size_t)(small_size2bin_tab[(size-1) >> LG_TINY_MIN]));
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE size_t
|
||||
small_bin2size(size_t binind)
|
||||
{
|
||||
|
||||
return ((size_t)(small_bin2size_tab[binind]));
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||
|
||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||
JEMALLOC_ALWAYS_INLINE arena_chunk_map_t *
|
||||
arena_mapp_get(arena_chunk_t *chunk, size_t pageind)
|
||||
{
|
||||
@@ -773,9 +795,9 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
|
||||
|
||||
return (binind);
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_A */
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
|
||||
# ifdef JEMALLOC_ARENA_INLINE_B
|
||||
# ifdef JEMALLOC_ARENA_INLINE_C
|
||||
JEMALLOC_INLINE size_t
|
||||
arena_bin_index(arena_t *arena, arena_bin_t *bin)
|
||||
{
|
||||
@@ -965,7 +987,7 @@ arena_salloc(const void *ptr, bool demote)
|
||||
assert(arena_mapbits_large_get(chunk, pageind) != 0 ||
|
||||
arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
|
||||
pageind)) == binind);
|
||||
ret = small_bin2size[binind];
|
||||
ret = small_bin2size(binind);
|
||||
}
|
||||
|
||||
return (ret);
|
||||
@@ -1004,7 +1026,7 @@ arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache)
|
||||
arena_dalloc_large(chunk->arena, chunk, ptr);
|
||||
}
|
||||
}
|
||||
# endif /* JEMALLOC_ARENA_INLINE_B */
|
||||
# endif /* JEMALLOC_ARENA_INLINE_C */
|
||||
#endif
|
||||
|
||||
#endif /* JEMALLOC_H_INLINES */
|
||||
|
@@ -499,6 +499,14 @@ void jemalloc_postfork_child(void);
|
||||
#include "jemalloc/internal/chunk.h"
|
||||
#include "jemalloc/internal/huge.h"
|
||||
|
||||
/*
|
||||
* Include arena.h the first time in order to provide inline functions for this
|
||||
* header's inlines.
|
||||
*/
|
||||
#define JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_A
|
||||
|
||||
#ifndef JEMALLOC_ENABLE_INLINE
|
||||
malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
|
||||
|
||||
@@ -526,7 +534,7 @@ s2u(size_t size)
|
||||
{
|
||||
|
||||
if (size <= SMALL_MAXCLASS)
|
||||
return (small_bin2size[SMALL_SIZE2BIN(size)]);
|
||||
return (small_bin2size(small_size2bin(size)));
|
||||
if (size <= arena_maxclass)
|
||||
return (PAGE_CEILING(size));
|
||||
return (CHUNK_CEILING(size));
|
||||
@@ -569,7 +577,7 @@ sa2u(size_t size, size_t alignment)
|
||||
|
||||
if (usize <= arena_maxclass && alignment <= PAGE) {
|
||||
if (usize <= SMALL_MAXCLASS)
|
||||
return (small_bin2size[SMALL_SIZE2BIN(usize)]);
|
||||
return (small_bin2size(small_size2bin(usize)));
|
||||
return (PAGE_CEILING(usize));
|
||||
} else {
|
||||
size_t run_size;
|
||||
@@ -643,16 +651,16 @@ choose_arena(arena_t *arena)
|
||||
#include "jemalloc/internal/bitmap.h"
|
||||
#include "jemalloc/internal/rtree.h"
|
||||
/*
|
||||
* Include arena.h twice in order to resolve circular dependencies with
|
||||
* tcache.h.
|
||||
* Include arena.h the second and third times in order to resolve circular
|
||||
* dependencies with tcache.h.
|
||||
*/
|
||||
#define JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_A
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#define JEMALLOC_ARENA_INLINE_B
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_B
|
||||
#include "jemalloc/internal/tcache.h"
|
||||
#define JEMALLOC_ARENA_INLINE_C
|
||||
#include "jemalloc/internal/arena.h"
|
||||
#undef JEMALLOC_ARENA_INLINE_C
|
||||
#include "jemalloc/internal/hash.h"
|
||||
#include "jemalloc/internal/quarantine.h"
|
||||
|
||||
@@ -794,7 +802,7 @@ u2rz(size_t usize)
|
||||
size_t ret;
|
||||
|
||||
if (usize <= SMALL_MAXCLASS) {
|
||||
size_t binind = SMALL_SIZE2BIN(usize);
|
||||
size_t binind = small_size2bin(usize);
|
||||
ret = arena_bin_info[binind].redzone_size;
|
||||
} else
|
||||
ret = 0;
|
||||
|
@@ -346,7 +346,9 @@ s2u
|
||||
sa2u
|
||||
set_errno
|
||||
small_bin2size
|
||||
small_bin2size_tab
|
||||
small_size2bin
|
||||
small_size2bin_tab
|
||||
stats_cactive
|
||||
stats_cactive_add
|
||||
stats_cactive_get
|
||||
|
@@ -263,10 +263,10 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
|
||||
size_t binind;
|
||||
tcache_bin_t *tbin;
|
||||
|
||||
binind = SMALL_SIZE2BIN(size);
|
||||
binind = small_size2bin(size);
|
||||
assert(binind < NBINS);
|
||||
tbin = &tcache->tbins[binind];
|
||||
size = small_bin2size[binind];
|
||||
size = small_bin2size(binind);
|
||||
ret = tcache_alloc_easy(tbin);
|
||||
if (ret == NULL) {
|
||||
ret = tcache_alloc_small_hard(tcache, tbin, binind);
|
||||
|
Reference in New Issue
Block a user