Fix lg_chunk clamping for config_cache_oblivious.
Fix lg_chunk clamping to take into account cache-oblivious large allocation. This regression only resulted in incorrect behavior if !config_fill (false unless --disable-fill specified) and config_cache_oblivious (true unless --disable-cache-oblivious specified). This regression was introduced by 8a03cf039cd06f9fa6972711195055d865673966 (Implement cache index randomization for large allocations.), which was first released in 4.0.0. This resolves #555.
This commit is contained in:
parent
1027a2682b
commit
7c124830a1
10
src/arena.c
10
src/arena.c
@ -2710,6 +2710,7 @@ arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
|
|||||||
return (arena_malloc_small(tsdn, arena, ind, zero));
|
return (arena_malloc_small(tsdn, arena, ind, zero));
|
||||||
if (likely(size <= large_maxclass))
|
if (likely(size <= large_maxclass))
|
||||||
return (arena_malloc_large(tsdn, arena, ind, zero));
|
return (arena_malloc_large(tsdn, arena, ind, zero));
|
||||||
|
assert(index2size(ind) >= chunksize);
|
||||||
return (huge_malloc(tsdn, arena, index2size(ind), zero));
|
return (huge_malloc(tsdn, arena, index2size(ind), zero));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3806,15 +3807,8 @@ arena_boot(void)
|
|||||||
arena_maxrun = chunksize - (map_bias << LG_PAGE);
|
arena_maxrun = chunksize - (map_bias << LG_PAGE);
|
||||||
assert(arena_maxrun > 0);
|
assert(arena_maxrun > 0);
|
||||||
large_maxclass = index2size(size2index(chunksize)-1);
|
large_maxclass = index2size(size2index(chunksize)-1);
|
||||||
if (large_maxclass > arena_maxrun) {
|
|
||||||
/*
|
|
||||||
* For small chunk sizes it's possible for there to be fewer
|
|
||||||
* non-header pages available than are necessary to serve the
|
|
||||||
* size classes just below chunksize.
|
|
||||||
*/
|
|
||||||
large_maxclass = arena_maxrun;
|
|
||||||
}
|
|
||||||
assert(large_maxclass > 0);
|
assert(large_maxclass > 0);
|
||||||
|
assert(large_maxclass + large_pad <= arena_maxrun);
|
||||||
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
|
nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
|
||||||
nhclasses = NSIZES - nlclasses - NBINS;
|
nhclasses = NSIZES - nlclasses - NBINS;
|
||||||
|
|
||||||
|
@ -1136,16 +1136,18 @@ malloc_conf_init(void)
|
|||||||
|
|
||||||
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
CONF_HANDLE_BOOL(opt_abort, "abort", true)
|
||||||
/*
|
/*
|
||||||
* Chunks always require at least one header page,
|
* Chunks always require at least one header page, as
|
||||||
* as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
|
* many as 2^(LG_SIZE_CLASS_GROUP+1) data pages (plus an
|
||||||
* possibly an additional page in the presence of
|
* additional page in the presence of cache-oblivious
|
||||||
* redzones. In order to simplify options processing,
|
* large), and possibly an additional page in the
|
||||||
* use a conservative bound that accommodates all these
|
* presence of redzones. In order to simplify options
|
||||||
* constraints.
|
* processing, use a conservative bound that
|
||||||
|
* accommodates all these constraints.
|
||||||
*/
|
*/
|
||||||
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
|
||||||
LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
|
LG_SIZE_CLASS_GROUP + 1 + (config_cache_oblivious ||
|
||||||
(sizeof(size_t) << 3) - 1, yes, yes, true)
|
config_fill ? 1 : 0), (sizeof(size_t) << 3) - 1,
|
||||||
|
yes, yes, true)
|
||||||
if (strncmp("dss", k, klen) == 0) {
|
if (strncmp("dss", k, klen) == 0) {
|
||||||
int i;
|
int i;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user