Refactor runs_avail.
Use pszind_t size classes rather than szind_t size classes, and always reserve space for NPSIZES elements. This removes unused heaps that are not multiples of the page size, and adds (currently) unused heaps for all huge size classes, with the immediate benefit that the size of arena_t allocations is constant (no longer dependent on chunk size).
This commit is contained in:
parent
226c446979
commit
7bb00ae9d6
@ -441,10 +441,12 @@ struct arena_s {
|
|||||||
arena_bin_t bins[NBINS];
|
arena_bin_t bins[NBINS];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Quantized address-ordered heaps of this arena's available runs. The
|
* Size-segregated address-ordered heaps of this arena's available runs,
|
||||||
* heaps are used for first-best-fit run allocation.
|
* used for first-best-fit run allocation. Runs are quantized, i.e.
|
||||||
|
* they reside in the last heap which corresponds to a size class less
|
||||||
|
* than or equal to the run size.
|
||||||
*/
|
*/
|
||||||
arena_run_heap_t runs_avail[1]; /* Dynamically sized. */
|
arena_run_heap_t runs_avail[NPSIZES];
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
/* Used in conjunction with tsd for fast arena-related context lookup. */
|
||||||
@ -476,7 +478,6 @@ extern size_t map_bias; /* Number of arena chunk header pages. */
|
|||||||
extern size_t map_misc_offset;
|
extern size_t map_misc_offset;
|
||||||
extern size_t arena_maxrun; /* Max run size for arenas. */
|
extern size_t arena_maxrun; /* Max run size for arenas. */
|
||||||
extern size_t large_maxclass; /* Max large size class. */
|
extern size_t large_maxclass; /* Max large size class. */
|
||||||
extern size_t run_quantize_max; /* Max run_quantize_*() input. */
|
|
||||||
extern unsigned nlclasses; /* Number of large size classes. */
|
extern unsigned nlclasses; /* Number of large size classes. */
|
||||||
extern unsigned nhclasses; /* Number of huge size classes. */
|
extern unsigned nhclasses; /* Number of huge size classes. */
|
||||||
|
|
||||||
|
@ -443,11 +443,16 @@ extern unsigned narenas_auto;
|
|||||||
*/
|
*/
|
||||||
extern arena_t **arenas;
|
extern arena_t **arenas;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* pind2sz_tab encodes the same information as could be computed by
|
||||||
|
* pind2sz_compute().
|
||||||
|
*/
|
||||||
|
extern size_t const pind2sz_tab[NPSIZES];
|
||||||
/*
|
/*
|
||||||
* index2size_tab encodes the same information as could be computed (at
|
* index2size_tab encodes the same information as could be computed (at
|
||||||
* unacceptable cost in some code paths) by index2size_compute().
|
* unacceptable cost in some code paths) by index2size_compute().
|
||||||
*/
|
*/
|
||||||
extern size_t const index2size_tab[NSIZES+1];
|
extern size_t const index2size_tab[NSIZES];
|
||||||
/*
|
/*
|
||||||
* size2index_tab is a compact lookup table that rounds request sizes up to
|
* size2index_tab is a compact lookup table that rounds request sizes up to
|
||||||
* size classes. In order to reduce cache footprint, the table is compressed,
|
* size classes. In order to reduce cache footprint, the table is compressed,
|
||||||
@ -529,6 +534,8 @@ void jemalloc_postfork_child(void);
|
|||||||
|
|
||||||
#ifndef JEMALLOC_ENABLE_INLINE
|
#ifndef JEMALLOC_ENABLE_INLINE
|
||||||
pszind_t psz2ind(size_t psz);
|
pszind_t psz2ind(size_t psz);
|
||||||
|
size_t pind2sz_compute(pszind_t pind);
|
||||||
|
size_t pind2sz_lookup(pszind_t pind);
|
||||||
size_t pind2sz(pszind_t pind);
|
size_t pind2sz(pszind_t pind);
|
||||||
size_t psz2u(size_t psz);
|
size_t psz2u(size_t psz);
|
||||||
szind_t size2index_compute(size_t size);
|
szind_t size2index_compute(size_t size);
|
||||||
@ -576,7 +583,7 @@ psz2ind(size_t psz)
|
|||||||
}
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
pind2sz(pszind_t pind)
|
pind2sz_compute(pszind_t pind)
|
||||||
{
|
{
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -596,6 +603,22 @@ pind2sz(pszind_t pind)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
pind2sz_lookup(pszind_t pind)
|
||||||
|
{
|
||||||
|
size_t ret = (size_t)pind2sz_tab[pind];
|
||||||
|
assert(ret == pind2sz_compute(pind));
|
||||||
|
return (ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
JEMALLOC_INLINE size_t
|
||||||
|
pind2sz(pszind_t pind)
|
||||||
|
{
|
||||||
|
|
||||||
|
assert(pind < NPSIZES);
|
||||||
|
return (pind2sz_lookup(pind));
|
||||||
|
}
|
||||||
|
|
||||||
JEMALLOC_INLINE size_t
|
JEMALLOC_INLINE size_t
|
||||||
psz2u(size_t psz)
|
psz2u(size_t psz)
|
||||||
{
|
{
|
||||||
|
@ -394,6 +394,8 @@ pages_purge
|
|||||||
pages_trim
|
pages_trim
|
||||||
pages_unmap
|
pages_unmap
|
||||||
pind2sz
|
pind2sz
|
||||||
|
pind2sz_compute
|
||||||
|
pind2sz_lookup
|
||||||
pow2_ceil_u32
|
pow2_ceil_u32
|
||||||
pow2_ceil_u64
|
pow2_ceil_u64
|
||||||
pow2_ceil_zu
|
pow2_ceil_zu
|
||||||
@ -468,7 +470,6 @@ rtree_val_read
|
|||||||
rtree_val_write
|
rtree_val_write
|
||||||
run_quantize_ceil
|
run_quantize_ceil
|
||||||
run_quantize_floor
|
run_quantize_floor
|
||||||
run_quantize_max
|
|
||||||
s2u
|
s2u
|
||||||
s2u_compute
|
s2u_compute
|
||||||
s2u_lookup
|
s2u_lookup
|
||||||
|
@ -40,6 +40,16 @@ lg() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
|
reg_size_compute() {
|
||||||
|
lg_grp=$1
|
||||||
|
lg_delta=$2
|
||||||
|
ndelta=$3
|
||||||
|
|
||||||
|
pow2 ${lg_grp}; grp=${pow2_result}
|
||||||
|
pow2 ${lg_delta}; delta=${pow2_result}
|
||||||
|
reg_size=$((${grp} + ${delta}*${ndelta}))
|
||||||
|
}
|
||||||
|
|
||||||
run_size() {
|
run_size() {
|
||||||
lg_p=$1
|
lg_p=$1
|
||||||
lg_grp=$2
|
lg_grp=$2
|
||||||
@ -47,10 +57,7 @@ run_size() {
|
|||||||
ndelta=$4
|
ndelta=$4
|
||||||
|
|
||||||
pow2 ${lg_p}; p=${pow2_result}
|
pow2 ${lg_p}; p=${pow2_result}
|
||||||
|
reg_size_compute ${lg_grp} ${lg_delta} ${ndelta}
|
||||||
pow2 ${lg_grp}; grp=${pow2_result}
|
|
||||||
pow2 ${lg_delta}; delta=${pow2_result}
|
|
||||||
reg_size=$((${grp} + ${delta}*${ndelta}))
|
|
||||||
|
|
||||||
# Compute smallest run size that is an integer multiple of reg_size.
|
# Compute smallest run size that is an integer multiple of reg_size.
|
||||||
try_run_size=${p}
|
try_run_size=${p}
|
||||||
|
61
src/arena.c
61
src/arena.c
@ -34,14 +34,11 @@ size_t map_bias;
|
|||||||
size_t map_misc_offset;
|
size_t map_misc_offset;
|
||||||
size_t arena_maxrun; /* Max run size for arenas. */
|
size_t arena_maxrun; /* Max run size for arenas. */
|
||||||
size_t large_maxclass; /* Max large size class. */
|
size_t large_maxclass; /* Max large size class. */
|
||||||
size_t run_quantize_max; /* Max run_quantize_*() input. */
|
|
||||||
static bool *small_run_tab; /* Valid small run page multiples. */
|
static bool *small_run_tab; /* Valid small run page multiples. */
|
||||||
static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
|
static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */
|
||||||
static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
|
static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */
|
||||||
unsigned nlclasses; /* Number of large size classes. */
|
unsigned nlclasses; /* Number of large size classes. */
|
||||||
unsigned nhclasses; /* Number of huge size classes. */
|
unsigned nhclasses; /* Number of huge size classes. */
|
||||||
static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */
|
|
||||||
static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
|
|
||||||
|
|
||||||
/******************************************************************************/
|
/******************************************************************************/
|
||||||
/*
|
/*
|
||||||
@ -177,7 +174,7 @@ run_quantize_floor(size_t size)
|
|||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert(size <= run_quantize_max);
|
assert(size <= HUGE_MAXCLASS);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
|
ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1];
|
||||||
@ -200,7 +197,7 @@ run_quantize_ceil(size_t size)
|
|||||||
size_t ret;
|
size_t ret;
|
||||||
|
|
||||||
assert(size > 0);
|
assert(size > 0);
|
||||||
assert(size <= run_quantize_max);
|
assert(size <= HUGE_MAXCLASS);
|
||||||
assert((size & PAGE_MASK) == 0);
|
assert((size & PAGE_MASK) == 0);
|
||||||
|
|
||||||
ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
|
ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1];
|
||||||
@ -213,25 +210,15 @@ run_quantize_ceil(size_t size)
|
|||||||
run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
|
run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static arena_run_heap_t *
|
|
||||||
arena_runs_avail_get(arena_t *arena, szind_t ind)
|
|
||||||
{
|
|
||||||
|
|
||||||
assert(ind >= runs_avail_bias);
|
|
||||||
assert(ind - runs_avail_bias < runs_avail_nclasses);
|
|
||||||
|
|
||||||
return (&arena->runs_avail[ind - runs_avail_bias]);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t npages)
|
size_t npages)
|
||||||
{
|
{
|
||||||
szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
|
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
|
||||||
arena_miscelm_get_const(chunk, pageind))));
|
arena_miscelm_get_const(chunk, pageind))));
|
||||||
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||||
LG_PAGE));
|
LG_PAGE));
|
||||||
arena_run_heap_insert(arena_runs_avail_get(arena, ind),
|
arena_run_heap_insert(&arena->runs_avail[pind],
|
||||||
arena_miscelm_get_mutable(chunk, pageind));
|
arena_miscelm_get_mutable(chunk, pageind));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,11 +226,11 @@ static void
|
|||||||
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
|
||||||
size_t npages)
|
size_t npages)
|
||||||
{
|
{
|
||||||
szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
|
pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
|
||||||
arena_miscelm_get_const(chunk, pageind))));
|
arena_miscelm_get_const(chunk, pageind))));
|
||||||
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
|
||||||
LG_PAGE));
|
LG_PAGE));
|
||||||
arena_run_heap_remove(arena_runs_avail_get(arena, ind),
|
arena_run_heap_remove(&arena->runs_avail[pind],
|
||||||
arena_miscelm_get_mutable(chunk, pageind));
|
arena_miscelm_get_mutable(chunk, pageind));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1088,12 +1075,13 @@ arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
|
|||||||
static arena_run_t *
|
static arena_run_t *
|
||||||
arena_run_first_best_fit(arena_t *arena, size_t size)
|
arena_run_first_best_fit(arena_t *arena, size_t size)
|
||||||
{
|
{
|
||||||
szind_t ind, i;
|
pszind_t pind, i;
|
||||||
|
|
||||||
ind = size2index(run_quantize_ceil(size));
|
pind = psz2ind(run_quantize_ceil(size));
|
||||||
for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
|
|
||||||
|
for (i = pind; pind2sz(i) <= large_maxclass; i++) {
|
||||||
arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
|
arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
|
||||||
arena_runs_avail_get(arena, i));
|
&arena->runs_avail[i]);
|
||||||
if (miscelm != NULL)
|
if (miscelm != NULL)
|
||||||
return (&miscelm->run);
|
return (&miscelm->run);
|
||||||
}
|
}
|
||||||
@ -1946,7 +1934,8 @@ arena_reset(tsd_t *tsd, arena_t *arena)
|
|||||||
assert(!arena->purging);
|
assert(!arena->purging);
|
||||||
arena->nactive = 0;
|
arena->nactive = 0;
|
||||||
|
|
||||||
for(i = 0; i < runs_avail_nclasses; i++)
|
for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
|
||||||
|
i++)
|
||||||
arena_run_heap_new(&arena->runs_avail[i]);
|
arena_run_heap_new(&arena->runs_avail[i]);
|
||||||
|
|
||||||
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
|
||||||
@ -3388,23 +3377,19 @@ arena_t *
|
|||||||
arena_new(tsdn_t *tsdn, unsigned ind)
|
arena_new(tsdn_t *tsdn, unsigned ind)
|
||||||
{
|
{
|
||||||
arena_t *arena;
|
arena_t *arena;
|
||||||
size_t arena_size;
|
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
/* Compute arena size to incorporate sufficient runs_avail elements. */
|
|
||||||
arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
|
|
||||||
runs_avail_nclasses);
|
|
||||||
/*
|
/*
|
||||||
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
|
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
|
||||||
* because there is no way to clean up if base_alloc() OOMs.
|
* because there is no way to clean up if base_alloc() OOMs.
|
||||||
*/
|
*/
|
||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
arena = (arena_t *)base_alloc(tsdn,
|
arena = (arena_t *)base_alloc(tsdn,
|
||||||
CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses *
|
CACHELINE_CEILING(sizeof(arena_t)) +
|
||||||
sizeof(malloc_large_stats_t) + nhclasses) *
|
QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)) +
|
||||||
sizeof(malloc_huge_stats_t));
|
(nhclasses * sizeof(malloc_huge_stats_t))));
|
||||||
} else
|
} else
|
||||||
arena = (arena_t *)base_alloc(tsdn, arena_size);
|
arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
|
||||||
if (arena == NULL)
|
if (arena == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
|
|
||||||
@ -3416,11 +3401,11 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
if (config_stats) {
|
if (config_stats) {
|
||||||
memset(&arena->stats, 0, sizeof(arena_stats_t));
|
memset(&arena->stats, 0, sizeof(arena_stats_t));
|
||||||
arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
|
arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
|
||||||
+ CACHELINE_CEILING(arena_size));
|
+ CACHELINE_CEILING(sizeof(arena_t)));
|
||||||
memset(arena->stats.lstats, 0, nlclasses *
|
memset(arena->stats.lstats, 0, nlclasses *
|
||||||
sizeof(malloc_large_stats_t));
|
sizeof(malloc_large_stats_t));
|
||||||
arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
|
arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
|
||||||
+ CACHELINE_CEILING(arena_size) +
|
+ CACHELINE_CEILING(sizeof(arena_t)) +
|
||||||
QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
|
QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
|
||||||
memset(arena->stats.hstats, 0, nhclasses *
|
memset(arena->stats.hstats, 0, nhclasses *
|
||||||
sizeof(malloc_huge_stats_t));
|
sizeof(malloc_huge_stats_t));
|
||||||
@ -3454,8 +3439,10 @@ arena_new(tsdn_t *tsdn, unsigned ind)
|
|||||||
arena->nactive = 0;
|
arena->nactive = 0;
|
||||||
arena->ndirty = 0;
|
arena->ndirty = 0;
|
||||||
|
|
||||||
for(i = 0; i < runs_avail_nclasses; i++)
|
for (i = 0; i < sizeof(arena->runs_avail) / sizeof(arena_run_heap_t);
|
||||||
|
i++)
|
||||||
arena_run_heap_new(&arena->runs_avail[i]);
|
arena_run_heap_new(&arena->runs_avail[i]);
|
||||||
|
|
||||||
qr_new(&arena->runs_dirty, rd_link);
|
qr_new(&arena->runs_dirty, rd_link);
|
||||||
qr_new(&arena->chunks_cache, cc_link);
|
qr_new(&arena->chunks_cache, cc_link);
|
||||||
|
|
||||||
@ -3526,6 +3513,7 @@ small_run_size_init(void)
|
|||||||
static bool
|
static bool
|
||||||
run_quantize_init(void)
|
run_quantize_init(void)
|
||||||
{
|
{
|
||||||
|
size_t run_quantize_max;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
run_quantize_max = chunksize + large_pad;
|
run_quantize_max = chunksize + large_pad;
|
||||||
@ -3604,9 +3592,6 @@ arena_boot(void)
|
|||||||
if (run_quantize_init())
|
if (run_quantize_init())
|
||||||
return (true);
|
return (true);
|
||||||
|
|
||||||
runs_avail_bias = size2index(PAGE);
|
|
||||||
runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias;
|
|
||||||
|
|
||||||
return (false);
|
return (false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -78,14 +78,25 @@ enum {
|
|||||||
};
|
};
|
||||||
static uint8_t malloc_slow_flags;
|
static uint8_t malloc_slow_flags;
|
||||||
|
|
||||||
/* Last entry for overflow detection only. */
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
const size_t index2size_tab[NSIZES+1] = {
|
const size_t pind2sz_tab[NPSIZES] = {
|
||||||
|
#define PSZ_yes(lg_grp, ndelta, lg_delta) \
|
||||||
|
(((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
|
||||||
|
#define PSZ_no(lg_grp, ndelta, lg_delta)
|
||||||
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||||
|
PSZ_##psz(lg_grp, ndelta, lg_delta)
|
||||||
|
SIZE_CLASSES
|
||||||
|
#undef PSZ_yes
|
||||||
|
#undef PSZ_no
|
||||||
|
#undef SC
|
||||||
|
};
|
||||||
|
|
||||||
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
|
const size_t index2size_tab[NSIZES] = {
|
||||||
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \
|
||||||
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
|
||||||
SIZE_CLASSES
|
SIZE_CLASSES
|
||||||
#undef SC
|
#undef SC
|
||||||
ZU(0)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
JEMALLOC_ALIGNED(CACHELINE)
|
JEMALLOC_ALIGNED(CACHELINE)
|
||||||
|
@ -111,7 +111,7 @@ TEST_BEGIN(test_monotonic)
|
|||||||
|
|
||||||
floor_prev = 0;
|
floor_prev = 0;
|
||||||
ceil_prev = 0;
|
ceil_prev = 0;
|
||||||
for (i = 1; i < run_quantize_max >> LG_PAGE; i++) {
|
for (i = 1; i <= large_maxclass >> LG_PAGE; i++) {
|
||||||
size_t run_size, floor, ceil;
|
size_t run_size, floor, ceil;
|
||||||
|
|
||||||
run_size = i << LG_PAGE;
|
run_size = i << LG_PAGE;
|
||||||
|
Loading…
Reference in New Issue
Block a user