Move slabs out of chunks.

This commit is contained in:
Jason Evans
2016-05-29 18:34:50 -07:00
parent d28e5a6696
commit 498856f44a
21 changed files with 596 additions and 2332 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -74,8 +74,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
base_resident += PAGE_CEILING(nsize);
}
}
extent_init(extent, NULL, addr, csize, 0, true, false, true, true,
false);
extent_init(extent, NULL, addr, csize, 0, true, true, true, false);
return (extent);
}

View File

@@ -558,8 +558,7 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
extent_dalloc(tsdn, arena, extent);
return (NULL);
}
extent_init(extent, arena, addr, size, usize, true, false, zero, commit,
slab);
extent_init(extent, arena, addr, size, usize, true, zero, commit, slab);
if (pad != 0)
extent_addr_randomize(tsdn, extent, alignment);
if (chunk_register(tsdn, extent)) {
@@ -828,8 +827,8 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_init(&lead, arena, extent_addr_get(extent), size_a,
usize_a, extent_active_get(extent),
extent_dirty_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
extent_zeroed_get(extent), extent_committed_get(extent),
extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, &lead, false, true, &lead_elm_a,
&lead_elm_b))
@@ -838,8 +837,8 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
size_a), size_b, usize_b, extent_active_get(extent),
extent_dirty_get(extent), extent_zeroed_get(extent),
extent_committed_get(extent), extent_slab_get(extent));
extent_zeroed_get(extent), extent_committed_get(extent),
extent_slab_get(extent));
if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a,
&trail_elm_b))
goto label_error_c;

View File

@@ -121,7 +121,7 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
pad_size = (uintptr_t)ret - (uintptr_t)pad_addr;
if (pad_size != 0) {
extent_init(pad, arena, pad_addr, pad_size,
pad_size, false, true, false, true, false);
pad_size, false, false, true, false);
}
dss_next = (void *)((uintptr_t)ret + size);
if ((uintptr_t)ret < (uintptr_t)dss_max ||

View File

@@ -124,7 +124,7 @@ CTL_PROTO(arena_i_chunk_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
CTL_PROTO(arenas_bin_i_slab_size)
INDEX_PROTO(arenas_bin_i)
CTL_PROTO(arenas_hchunk_i_size)
INDEX_PROTO(arenas_hchunk_i)
@@ -160,9 +160,9 @@ CTL_PROTO(stats_arenas_i_bins_j_nrequests)
CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
CTL_PROTO(stats_arenas_i_bins_j_curruns)
CTL_PROTO(stats_arenas_i_bins_j_nslabs)
CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
CTL_PROTO(stats_arenas_i_bins_j_curslabs)
INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
@@ -300,7 +300,7 @@ static const ctl_indexed_node_t arena_node[] = {
static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
{NAME("run_size"), CTL(arenas_bin_i_run_size)}
{NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
{NAME(""), CHILD(named, arenas_bin_i)}
@@ -373,9 +373,9 @@ static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
{NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
{NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
{NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}
};
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
{NAME(""), CHILD(named, stats_arenas_i_bins_j)}
@@ -549,9 +549,10 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
}
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
sstats->bstats[i].nslabs += astats->bstats[i].nslabs;
sstats->bstats[i].reslabs += astats->bstats[i].reslabs;
sstats->bstats[i].curslabs +=
astats->bstats[i].curslabs;
}
for (i = 0; i < NSIZES - NBINS; i++) {
@@ -1801,7 +1802,7 @@ CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
static const ctl_named_node_t *
arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
@@ -2032,12 +2033,12 @@ CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
ctl_stats.arenas[mib[2]].bstats[mib[4]].reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curslabs, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,

View File

@@ -153,8 +153,8 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
* There will always be trailing bytes, because ptr's
* offset from the beginning of the run is a multiple of
* CACHELINE in [0 .. PAGE).
* offset from the beginning of the extent is a multiple
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
((uintptr_t)extent_addr_get(extent) + oldusize);

View File

@@ -1707,28 +1707,30 @@ irealloc_prof_sample(tsd_t *tsd, extent_t *extent, void *old_ptr,
}
JEMALLOC_ALWAYS_INLINE_C void *
irealloc_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
irealloc_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
size_t usize)
{
void *p;
extent_t *e;
extent_t *extent;
bool prof_active;
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), extent, old_ptr);
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_extent, old_ptr);
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irealloc_prof_sample(tsd, extent, old_ptr, old_usize, usize,
tctx);
} else
p = iralloc(tsd, extent, old_ptr, old_usize, usize, 0, false);
p = irealloc_prof_sample(tsd, old_extent, old_ptr, old_usize,
usize, tctx);
} else {
p = iralloc(tsd, old_extent, old_ptr, old_usize, usize, 0,
false);
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
e = (p == old_ptr) ? extent : iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, e, p, usize, tctx, prof_active, true,
extent = (p == old_ptr) ? old_extent : iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, extent, p, usize, tctx, prof_active, true, old_extent,
old_ptr, old_usize, old_tctx);
return (p);
@@ -2146,24 +2148,24 @@ irallocx_prof_sample(tsdn_t *tsdn, extent_t *extent, void *old_ptr,
}
JEMALLOC_ALWAYS_INLINE_C void *
irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
irallocx_prof(tsd_t *tsd, extent_t *old_extent, void *old_ptr, size_t old_usize,
size_t size, size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
arena_t *arena)
{
void *p;
extent_t *e;
extent_t *extent;
bool prof_active;
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
old_tctx = prof_tctx_get(tsd_tsdn(tsd), extent, old_ptr);
old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_extent, old_ptr);
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd_tsdn(tsd), extent, old_ptr,
p = irallocx_prof_sample(tsd_tsdn(tsd), old_extent, old_ptr,
old_usize, *usize, alignment, zero, tcache, arena, tctx);
} else {
p = iralloct(tsd_tsdn(tsd), extent, old_ptr, old_usize, size,
alignment, zero, tcache, arena);
p = iralloct(tsd_tsdn(tsd), old_extent, old_ptr, old_usize,
size, alignment, zero, tcache, arena);
}
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
@@ -2179,12 +2181,12 @@ irallocx_prof(tsd_t *tsd, extent_t *extent, void *old_ptr, size_t old_usize,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
e = extent;
*usize = isalloc(tsd_tsdn(tsd), e, p);
extent = old_extent;
*usize = isalloc(tsd_tsdn(tsd), extent, p);
} else
e = iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, e, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx);
extent = iealloc(tsd_tsdn(tsd), p);
prof_realloc(tsd, extent, p, *usize, tctx, prof_active, true,
old_extent, old_ptr, old_usize, old_tctx);
return (p);
}
@@ -2338,8 +2340,8 @@ ixallocx_prof(tsd_t *tsd, extent_t *extent, void *ptr, size_t old_usize,
prof_alloc_rollback(tsd, tctx, false);
return (usize);
}
prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, ptr,
old_usize, old_tctx);
prof_realloc(tsd, extent, ptr, usize, tctx, prof_active, false, extent,
ptr, old_usize, old_tctx);
return (usize);
}

View File

@@ -58,29 +58,29 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs curruns regs"
" pgs util nfills nflushes newruns"
" reruns\n");
" ndalloc nrequests curregs curslabs regs"
" pgs util nfills nflushes newslabs"
" reslabs\n");
} else {
malloc_cprintf(write_cb, cbopaque,
"bins: size ind allocated nmalloc"
" ndalloc nrequests curregs curruns regs"
" pgs util newruns reruns\n");
" ndalloc nrequests curregs curslabs regs"
" pgs util newslabs reslabs\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nruns;
uint64_t nslabs;
CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
uint64_t);
if (nruns == 0)
if (nslabs == 0)
in_gap = true;
else {
size_t reg_size, run_size, curregs, availregs, milli;
size_t curruns;
size_t reg_size, slab_size, curregs, availregs, milli;
size_t curslabs;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
uint64_t reslabs;
char util[6]; /* "x.yyy". */
if (in_gap) {
@@ -90,7 +90,7 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
CTL_M2_GET("arenas.bin.0.run_size", j, &run_size,
CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size,
size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j,
&nmalloc, uint64_t);
@@ -106,12 +106,12 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes",
i, j, &nflushes, uint64_t);
}
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j,
&reruns, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j,
&curruns, size_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j,
&reslabs, uint64_t);
CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j,
&curslabs, size_t);
availregs = nregs * curruns;
availregs = nregs * curslabs;
milli = (availregs != 0) ? (1000 * curregs) / availregs
: 1000;
assert(milli <= 1000);
@@ -134,9 +134,9 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12zu %4u %3zu %-5s %12"FMTu64
" %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nfills, nflushes,
nruns, reruns);
ndalloc, nrequests, curregs, curslabs,
nregs, slab_size / page, util, nfills,
nflushes, nslabs, reslabs);
} else {
malloc_cprintf(write_cb, cbopaque,
"%20zu %3u %12zu %12"FMTu64
@@ -144,8 +144,9 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
" %12zu %4u %3zu %-5s %12"FMTu64
" %12"FMTu64"\n",
reg_size, j, curregs * reg_size, nmalloc,
ndalloc, nrequests, curregs, curruns, nregs,
run_size / page, util, nruns, reruns);
ndalloc, nrequests, curregs, curslabs,
nregs, slab_size / page, util, nslabs,
reslabs);
}
}
}

View File

@@ -127,14 +127,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
extent = iealloc(tsd_tsdn(tsd), ptr);
if (extent_arena_get(extent) == bin_arena) {
arena_chunk_t *chunk =
(arena_chunk_t *)extent_base_get(extent);
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
arena_bitselm_get_mutable(chunk, pageind);
arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
bin_arena, chunk, extent, ptr, bitselm);
bin_arena, extent, ptr);
} else {
/*
* This object was allocated via a different