SC: Remove global data.

The global data is mostly only used at initialization, or for easy access to
values we could compute statically.  Instead of consuming that space (and
risking TLB misses), we can just pass around a pointer to stack data during
bootstrapping.
This commit is contained in:
David Goldblatt
2018-07-19 17:08:10 -07:00
committed by David Goldblatt
parent 4bc48718b2
commit 3aba072cef
16 changed files with 73 additions and 73 deletions

View File

@@ -85,7 +85,7 @@ size_t arena_extent_sn_next(arena_t *arena);
arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
bool arena_init_huge(void);
arena_t *arena_choose_huge(tsd_t *tsd);
void arena_boot(void);
void arena_boot(sc_data_t *sc_data);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);

View File

@@ -181,14 +181,14 @@ struct extents_s {
*
* Synchronization: mtx.
*/
extent_heap_t heaps[SC_NPSIZES_MAX + 1];
extent_heap_t heaps[SC_NPSIZES + 1];
/*
* Bitmap for which set bits correspond to non-empty heaps.
*
* Synchronization: mtx.
*/
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES_MAX + 1)];
bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
/*
* LRU of all extents in heaps.

View File

@@ -182,6 +182,7 @@
#define SC_NGROUP (1ULL << SC_LG_NGROUP)
#define SC_PTR_BITS ((1ULL << LG_SIZEOF_PTR) * 8)
#define SC_NTINY (LG_QUANTUM - SC_LG_TINY_MIN)
#define SC_LG_TINY_MAXCLASS (LG_QUANTUM > SC_LG_TINY_MIN ? LG_QUANTUM - 1 : -1)
#define SC_NPSEUDO SC_NGROUP
#define SC_LG_FIRST_REGULAR_BASE (LG_QUANTUM + SC_LG_NGROUP)
/*
@@ -200,7 +201,7 @@
* because delta may be smaller than a page, this is not the same as the number
* of size classes that are *multiples* of the page size.
*/
#define SC_NPSIZES_MAX ( \
#define SC_NPSIZES ( \
/* Start with all the size classes. */ \
SC_NSIZES \
/* Subtract out those groups with too small a base. */ \
@@ -209,11 +210,8 @@
- SC_NPSEUDO \
/* And the tiny group. */ \
- SC_NTINY \
/* \
* In the lg_base == lg_page - 1 group, only the last sc is big \
* enough to make it to lg_page. \
*/ \
- (SC_NGROUP - 1))
/* Groups where ndelta*delta is not a multiple of the page size. */ \
- (2 * (SC_NGROUP)))
/*
* We declare a size class is binnable if size < page size * group. Or, in other
@@ -314,7 +312,6 @@ struct sc_data_s {
sc_t sc[SC_NSIZES];
};
extern sc_data_t sc_data_global;
void sc_data_init(sc_data_t *data);
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
@@ -322,6 +319,6 @@ void sc_data_init(sc_data_t *data);
*/
void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
int pgs);
void sc_boot();
void sc_boot(sc_data_t *data);
#endif /* JEMALLOC_INTERNAL_SC_H */

View File

@@ -26,7 +26,7 @@
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
*/
extern size_t sz_pind2sz_tab[SC_NPSIZES_MAX + 1];
extern size_t sz_pind2sz_tab[SC_NPSIZES + 1];
/*
* sz_index2size_tab encodes the same information as could be computed (at
* unacceptable cost in some code paths) by sz_index2size_compute().
@@ -52,7 +52,7 @@ extern void sz_boot(const sc_data_t *sc_data);
JEMALLOC_ALWAYS_INLINE pszind_t
sz_psz2ind(size_t psz) {
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
return sc_data_global.npsizes;
return SC_NPSIZES;
}
pszind_t x = lg_floor((psz<<1)-1);
pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
@@ -72,7 +72,7 @@ sz_psz2ind(size_t psz) {
static inline size_t
sz_pind2sz_compute(pszind_t pind) {
if (unlikely(pind == sc_data_global.npsizes)) {
if (unlikely(pind == SC_NPSIZES)) {
return SC_LARGE_MAXCLASS + PAGE;
}
size_t grp = pind >> SC_LG_NGROUP;
@@ -99,7 +99,7 @@ sz_pind2sz_lookup(pszind_t pind) {
static inline size_t
sz_pind2sz(pszind_t pind) {
assert(pind < sc_data_global.npsizes + 1);
assert(pind < SC_NPSIZES + 1);
return sz_pind2sz_lookup(pind);
}
@@ -123,9 +123,8 @@ sz_size2index_compute(size_t size) {
return SC_NSIZES;
}
#if (SC_NTINY != 0)
if (size <= (ZU(1) << sc_data_global.lg_tiny_maxclass)) {
szind_t lg_tmin = sc_data_global.lg_tiny_maxclass
- sc_data_global.ntiny + 1;
if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
szind_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
}
@@ -143,7 +142,7 @@ sz_size2index_compute(size_t size) {
szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
((ZU(1) << SC_LG_NGROUP) - 1);
szind_t index = sc_data_global.ntiny + grp + mod;
szind_t index = SC_NTINY + grp + mod;
return index;
}
}
@@ -168,13 +167,12 @@ sz_size2index(size_t size) {
static inline size_t
sz_index2size_compute(szind_t index) {
#if (SC_NTINY > 0)
if (index < sc_data_global.ntiny) {
return (ZU(1) << (sc_data_global.lg_tiny_maxclass
- sc_data_global.ntiny + 1 + index));
if (index < SC_NTINY) {
return (ZU(1) << (SC_LG_TINY_MAXCLASS - SC_NTINY + 1 + index));
}
#endif
{
size_t reduced_index = index - sc_data_global.ntiny;
size_t reduced_index = index - SC_NTINY;
size_t grp = reduced_index >> SC_LG_NGROUP;
size_t mod = reduced_index & ((ZU(1) << SC_LG_NGROUP) -
1);
@@ -211,9 +209,8 @@ sz_s2u_compute(size_t size) {
return 0;
}
#if (SC_NTINY > 0)
if (size <= (ZU(1) << sc_data_global.lg_tiny_maxclass)) {
size_t lg_tmin = sc_data_global.lg_tiny_maxclass
- sc_data_global.ntiny + 1;
if (size <= (ZU(1) << SC_LG_TINY_MAXCLASS)) {
size_t lg_tmin = SC_LG_TINY_MAXCLASS - SC_NTINY + 1;
size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
(ZU(1) << lg_ceil));