Reduce size of small_size2bin lookup table.

Convert all direct small_size2bin[...] accesses to SMALL_SIZE2BIN(...)
macro calls, and use a couple of cheap math operations to allow
compacting the table by 4X or 8X, on 32- and 64-bit systems,
respectively.
This commit is contained in:
Jason Evans
2011-03-06 22:56:36 -08:00
parent ff7450727f
commit 41ade967c2
4 changed files with 52 additions and 41 deletions

View File

@@ -19,6 +19,7 @@
#ifdef JEMALLOC_TINY
/* Smallest size class to support. */
# define LG_TINY_MIN LG_SIZEOF_PTR
# define TINY_MIN (1U << LG_TINY_MIN)
#endif
/*
@@ -389,7 +390,13 @@ struct arena_s {
extern size_t opt_lg_qspace_max;
extern size_t opt_lg_cspace_max;
extern ssize_t opt_lg_dirty_mult;
/*
* small_size2bin is a compact lookup table that rounds request sizes up to
* size classes. In order to reduce cache footprint, the table is compressed,
* and all accesses are via the SMALL_SIZE2BIN macro.
*/
extern uint8_t const *small_size2bin;
#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN])
/* Various bin-related settings. */
#ifdef JEMALLOC_TINY /* Number of (2^n)-spaced tiny bins. */

View File

@@ -402,7 +402,7 @@ s2u(size_t size)
{
if (size <= small_maxclass)
return (arenas[0]->bins[small_size2bin[size]].reg_size);
return (arenas[0]->bins[SMALL_SIZE2BIN(size)].reg_size);
if (size <= arena_maxclass)
return (PAGE_CEILING(size));
return (CHUNK_CEILING(size));
@@ -448,7 +448,7 @@ sa2u(size_t size, size_t alignment, size_t *run_size_p)
if (usize <= arena_maxclass && alignment <= PAGE_SIZE) {
if (usize <= small_maxclass) {
return
(arenas[0]->bins[small_size2bin[usize]].reg_size);
(arenas[0]->bins[SMALL_SIZE2BIN(usize)].reg_size);
}
return (PAGE_CEILING(usize));
} else {

View File

@@ -223,7 +223,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
size_t binind;
tcache_bin_t *tbin;
binind = small_size2bin[size];
binind = SMALL_SIZE2BIN(size);
assert(binind < nbins);
tbin = &tcache->tbins[binind];
ret = tcache_alloc_easy(tbin);