diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index fac6fd34..c298df92 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -486,8 +486,6 @@ void arena_chunk_cache_maybe_remove(arena_t *arena, extent_t *extent, extent_t *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool *zero); void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent); -void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, - extent_t *extent, size_t oldsize); void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize); void arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h index c3bdacb4..919b74f6 100644 --- a/include/jemalloc/internal/extent.h +++ b/include/jemalloc/internal/extent.h @@ -90,6 +90,8 @@ ph_proto(, extent_heap_, extent_heap_t, extent_t) arena_t *extent_arena_get(const extent_t *extent); void *extent_addr_get(const extent_t *extent); size_t extent_size_get(const extent_t *extent); +void *extent_before_get(const extent_t *extent); +void *extent_last_get(const extent_t *extent); void *extent_past_get(const extent_t *extent); bool extent_active_get(const extent_t *extent); bool extent_dirty_get(const extent_t *extent); @@ -137,6 +139,20 @@ extent_size_get(const extent_t *extent) return (extent->e_size); } +JEMALLOC_INLINE void * +extent_before_get(const extent_t *extent) +{ + + return ((void *)(uintptr_t)extent->e_addr - PAGE); +} + +JEMALLOC_INLINE void * +extent_last_get(const extent_t *extent) +{ + + return ((void *)(uintptr_t)extent->e_addr + extent->e_size - PAGE); +} + JEMALLOC_INLINE void * extent_past_get(const extent_t *extent) { diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 7afe5694..ef4e0522 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -797,14 +797,14 @@ sa2u(size_t size, size_t alignment) return (usize); } - /* Try for a large size class. */ - if (likely(size <= large_maxclass) && likely(alignment < chunksize)) { - /* - * We can't achieve subpage alignment, so round up alignment - * to the minimum that can actually be supported. - */ - alignment = PAGE_CEILING(alignment); + /* + * We can't achieve subpage alignment, so round up alignment to the + * minimum that can actually be supported. + */ + alignment = PAGE_CEILING(alignment); + /* Try for a large size class. */ + if (likely(size <= large_maxclass) && likely(alignment == PAGE)) { /* Make sure result is a large size class. */ usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size); @@ -821,12 +821,6 @@ sa2u(size_t size, size_t alignment) if (unlikely(alignment > HUGE_MAXCLASS)) return (0); - /* - * We can't achieve subchunk alignment, so round up alignment to the - * minimum that can actually be supported. - */ - alignment = CHUNK_CEILING(alignment); - /* Make sure result is a huge size class. */ if (size <= chunksize) usize = chunksize; @@ -839,7 +833,7 @@ sa2u(size_t size, size_t alignment) } /* - * Calculate the multi-chunk mapping that huge_palloc() would need in + * Calculate the multi-page mapping that huge_palloc() would need in * order to guarantee the alignment. */ if (usize + alignment < usize) { diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt index c4466013..44a2cd31 100644 --- a/include/jemalloc/internal/private_symbols.txt +++ b/include/jemalloc/internal/private_symbols.txt @@ -19,7 +19,6 @@ arena_chunk_cache_maybe_remove arena_chunk_dalloc_huge arena_chunk_ralloc_huge_expand arena_chunk_ralloc_huge_shrink -arena_chunk_ralloc_huge_similar arena_cleanup arena_dalloc arena_dalloc_bin @@ -211,6 +210,7 @@ extent_addr_set extent_alloc extent_arena_get extent_arena_set +extent_before_get extent_committed_get extent_committed_set extent_dalloc @@ -219,6 +219,7 @@ extent_dirty_insert extent_dirty_remove extent_dirty_set extent_init +extent_last_get extent_past_get extent_prof_tctx_get extent_prof_tctx_set diff --git a/src/arena.c b/src/arena.c index 39764c3f..aff11961 100644 --- a/src/arena.c +++ b/src/arena.c @@ -653,7 +653,7 @@ arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, malloc_mutex_unlock(tsdn, &arena->lock); extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, chunksize, - chunksize, zero, commit, true); + PAGE, zero, commit, true); if (extent != NULL && !*commit) { /* Commit header. */ if (chunk_commit_wrapper(tsdn, arena, chunk_hooks, extent, 0, @@ -676,7 +676,7 @@ arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL, - chunksize, chunksize, zero, true); + chunksize, PAGE, zero, true); if (extent != NULL) *commit = true; if (extent == NULL) { @@ -892,13 +892,12 @@ arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) static extent_t * arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, - chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, - size_t csize) + chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero) { extent_t *extent; bool commit = true; - extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, + extent = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, usize, alignment, zero, &commit, false); if (extent == NULL) { /* Revert optimistic stats updates. */ @@ -920,7 +919,6 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, { extent_t *extent; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - size_t csize = CHUNK_CEILING(usize); malloc_mutex_lock(tsdn, &arena->lock); @@ -932,11 +930,11 @@ arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, arena_nactive_add(arena, usize >> LG_PAGE); extent = arena_chunk_cache_alloc_locked(tsdn, arena, &chunk_hooks, NULL, - csize, alignment, zero, false); + usize, alignment, zero, false); malloc_mutex_unlock(tsdn, &arena->lock); if (extent == NULL) { extent = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, - usize, alignment, zero, csize); + usize, alignment, zero); } return (extent); @@ -954,32 +952,10 @@ arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, extent_t *extent) } arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); - if ((extent_size_get(extent) & chunksize_mask) != 0) - extent_size_set(extent, CHUNK_CEILING(extent_size_get(extent))); - arena_chunk_cache_dalloc_locked(tsdn, arena, &chunk_hooks, extent); malloc_mutex_unlock(tsdn, &arena->lock); } -void -arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, extent_t *extent, - size_t oldsize) -{ - size_t usize = extent_size_get(extent); - - assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); - assert(oldsize != usize); - - malloc_mutex_lock(tsdn, &arena->lock); - if (config_stats) - arena_huge_ralloc_stats_update(arena, oldsize, usize); - if (oldsize < usize) - arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); - else - arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); - malloc_mutex_unlock(tsdn, &arena->lock); -} - void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t oldsize) @@ -1501,8 +1477,7 @@ arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, zero = false; extent = arena_chunk_cache_alloc_locked(tsdn, arena, chunk_hooks, extent_addr_get(chunkselm), - extent_size_get(chunkselm), chunksize, &zero, - false); + extent_size_get(chunkselm), PAGE, &zero, false); assert(extent == chunkselm); assert(zero == extent_zeroed_get(chunkselm)); extent_dirty_insert(chunkselm, purge_runs_sentinel, @@ -2641,7 +2616,7 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, if (likely(usize <= large_maxclass)) { ret = arena_palloc_large(tsdn, arena, usize, alignment, zero); - } else if (likely(alignment <= chunksize)) + } else if (likely(alignment <= PAGE)) ret = huge_malloc(tsdn, arena, usize, zero); else ret = huge_palloc(tsdn, arena, usize, alignment, zero); diff --git a/src/base.c b/src/base.c index 225f522b..1e32d955 100644 --- a/src/base.c +++ b/src/base.c @@ -57,7 +57,7 @@ base_chunk_alloc(tsdn_t *tsdn, size_t minsize) { bool zero = true; bool commit = true; - addr = chunk_alloc_mmap(NULL, csize, chunksize, &zero, &commit); + addr = chunk_alloc_mmap(NULL, csize, PAGE, &zero, &commit); } if (addr == NULL) { if (extent != NULL) diff --git a/src/chunk.c b/src/chunk.c index ba9084e3..2b599610 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -58,8 +58,7 @@ static void chunk_record(tsdn_t *tsdn, arena_t *arena, static void extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent) { - size_t psz = - extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent))); + size_t psz = extent_size_quantize_floor(extent_size_get(extent)); pszind_t pind = psz2ind(psz); extent_heap_insert(&extent_heaps[pind], extent); } @@ -67,8 +66,7 @@ extent_heaps_insert(extent_heap_t extent_heaps[NPSIZES], extent_t *extent) static void extent_heaps_remove(extent_heap_t extent_heaps[NPSIZES], extent_t *extent) { - size_t psz = - extent_size_quantize_floor(CHUNK_CEILING(extent_size_get(extent))); + size_t psz = extent_size_quantize_floor(extent_size_get(extent)); pszind_t pind = psz2ind(psz); extent_heap_remove(&extent_heaps[pind], extent); } @@ -169,11 +167,9 @@ extent_rtree_acquire(tsdn_t *tsdn, const extent_t *extent, bool dependent, assert(*r_elm_a != NULL); if (extent_size_get(extent) > PAGE) { - uintptr_t last = - (CHUNK_CEILING((uintptr_t)extent_past_get(extent)) - PAGE); - - *r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, last, - dependent, init_missing); + *r_elm_b = rtree_elm_acquire(tsdn, &chunks_rtree, + (uintptr_t)extent_last_get(extent), dependent, + init_missing); if (!dependent && *r_elm_b == NULL) return (true); assert(*r_elm_b != NULL); @@ -290,8 +286,6 @@ chunk_first_best_fit(arena_t *arena, extent_heap_t extent_heaps[NPSIZES], { pszind_t pind, i; - assert(size == CHUNK_CEILING(size)); - pind = psz2ind(extent_size_quantize_ceil(size)); for (i = pind; i < NPSIZES; i++) { extent_t *extent = extent_heap_first(&extent_heaps[i]); @@ -326,9 +320,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_t *extent; size_t alloc_size, leadsize, trailsize; - assert(new_addr == NULL || alignment == chunksize); - - alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); + alloc_size = s2u(size + alignment - PAGE); /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); @@ -441,9 +433,7 @@ chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, void *ret; assert(size != 0); - assert((size & chunksize_mask) == 0); assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = @@ -472,9 +462,7 @@ chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool commit; assert(size != 0); - assert((size & chunksize_mask) == 0); assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); commit = true; extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_cached, @@ -525,9 +513,7 @@ chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_t *extent; assert(size != 0); - assert((size & chunksize_mask) == 0); assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); extent = chunk_recycle(tsdn, arena, chunk_hooks, arena->chunks_retained, false, new_addr, size, alignment, zero, commit, slab); @@ -551,8 +537,10 @@ chunk_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, return (NULL); addr = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, arena->ind); - if (addr == NULL) + if (addr == NULL) { + extent_dalloc(tsdn, arena, extent); return (NULL); + } extent_init(extent, arena, addr, size, true, false, zero, commit, slab); if (chunk_register(tsdn, extent)) { chunk_leak(tsdn, arena, chunk_hooks, false, extent); @@ -585,9 +573,6 @@ static bool chunk_can_coalesce(const extent_t *a, const extent_t *b) { - assert((void *)CHUNK_CEILING((uintptr_t)extent_past_get(a)) == - extent_addr_get(b)); - if (extent_arena_get(a) != extent_arena_get(b)) return (false); if (extent_active_get(a) != extent_active_get(b)) @@ -637,7 +622,6 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); - assert((extent_size_get(extent) & chunksize_mask) == 0); extent_active_set(extent, false); extent_zeroed_set(extent, !cache && extent_zeroed_get(extent)); if (extent_slab_get(extent)) { @@ -651,7 +635,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, /* Try to coalesce forward. */ next = rtree_read(tsdn, &chunks_rtree, - CHUNK_CEILING((uintptr_t)extent_past_get(extent)), false); + (uintptr_t)extent_past_get(extent), false); if (next != NULL) { chunk_try_coalesce(tsdn, arena, chunk_hooks, extent, next, extent_heaps, cache); @@ -659,7 +643,7 @@ chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, /* Try to coalesce backward. */ prev = rtree_read(tsdn, &chunks_rtree, - (uintptr_t)extent_addr_get(extent) - PAGE, false); + (uintptr_t)extent_before_get(extent), false); if (prev != NULL) { chunk_try_coalesce(tsdn, arena, chunk_hooks, prev, extent, extent_heaps, cache); @@ -675,7 +659,6 @@ chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(extent_addr_get(extent) != NULL); assert(extent_size_get(extent) != 0); - assert((extent_size_get(extent) & chunksize_mask) == 0); extent_zeroed_set(extent, false); @@ -700,7 +683,6 @@ chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, assert(extent_addr_get(extent) != NULL); assert(extent_size_get(extent) != 0); - assert((extent_size_get(extent) & chunksize_mask) == 0); chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); /* Try to deallocate. */ @@ -788,8 +770,7 @@ chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); return (chunk_hooks->purge(extent_addr_get(extent), - CHUNK_CEILING(extent_size_get(extent)), offset, length, - arena->ind)); + extent_size_get(extent), offset, length, arena->ind)); } static bool @@ -809,9 +790,7 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_t *trail; rtree_elm_t *lead_elm_a, *lead_elm_b, *trail_elm_a, *trail_elm_b; - assert(CHUNK_CEILING(size_a) == size_a); - assert(CHUNK_CEILING(extent_size_get(extent)) == size_a + - CHUNK_CEILING(size_b)); + assert(extent_size_get(extent) == size_a + size_b); chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); @@ -833,16 +812,15 @@ chunk_split_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, } extent_init(trail, arena, (void *)((uintptr_t)extent_addr_get(extent) + - size_a), CHUNK_CEILING(size_b), extent_active_get(extent), + size_a), size_b, extent_active_get(extent), extent_dirty_get(extent), extent_zeroed_get(extent), extent_committed_get(extent), extent_slab_get(extent)); if (extent_rtree_acquire(tsdn, trail, false, true, &trail_elm_a, &trail_elm_b)) goto label_error_c; - if (chunk_hooks->split(extent_addr_get(extent), size_a + - CHUNK_CEILING(size_b), size_a, CHUNK_CEILING(size_b), - extent_committed_get(extent), arena->ind)) + if (chunk_hooks->split(extent_addr_get(extent), size_a + size_b, size_a, + size_b, extent_committed_get(extent), arena->ind)) goto label_error_d; extent_size_set(extent, size_a); @@ -886,9 +864,6 @@ chunk_merge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, { rtree_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; - assert((extent_size_get(a) & chunksize_mask) == 0); - assert((extent_size_get(b) & chunksize_mask) == 0); - chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); if (chunk_hooks->merge(extent_addr_get(a), extent_size_get(a), extent_addr_get(b), extent_size_get(b), extent_committed_get(a), diff --git a/src/chunk_dss.c b/src/chunk_dss.c index d0fae7bc..0119c12b 100644 --- a/src/chunk_dss.c +++ b/src/chunk_dss.c @@ -69,9 +69,12 @@ void * chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { + void *ret; + extent_t *pad; + cassert(have_dss); - assert(size > 0 && (size & chunksize_mask) == 0); - assert(alignment > 0 && (alignment & chunksize_mask) == 0); + assert(size > 0); + assert(alignment > 0); /* * sbrk() uses a signed increment argument, so take care not to @@ -80,19 +83,22 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, if ((intptr_t)size < 0) return (NULL); + pad = extent_alloc(tsdn, arena); + if (pad == NULL) + return (NULL); + malloc_mutex_lock(tsdn, &dss_mtx); if (dss_prev != (void *)-1) { - /* * The loop is necessary to recover from races with other * threads that are using the DSS for something other than * malloc. */ - do { - void *ret, *cpad_addr, *dss_next; - extent_t *cpad; - size_t gap_size, cpad_size; + while (true) { + void *pad_addr, *dss_next; + size_t pad_size; intptr_t incr; + /* Avoid an unnecessary system call. */ if (new_addr != NULL && dss_max != new_addr) break; @@ -105,58 +111,48 @@ chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, break; /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. + * Compute how much pad space (if any) is necessary to + * satisfy alignment. This space can be recycled for + * later use. */ - gap_size = (chunksize - ALIGNMENT_ADDR2OFFSET(dss_max, - chunksize)) & chunksize_mask; - /* - * Compute how much chunk-aligned pad space (if any) is - * necessary to satisfy alignment. This space can be - * recycled for later use. - */ - cpad_addr = (void *)((uintptr_t)dss_max + gap_size); + pad_addr = (void *)((uintptr_t)dss_max); ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, alignment); - cpad_size = (uintptr_t)ret - (uintptr_t)cpad_addr; - if (cpad_size != 0) { - cpad = extent_alloc(tsdn, arena); - if (cpad == NULL) { - malloc_mutex_unlock(tsdn, &dss_mtx); - return (NULL); - } - extent_init(cpad, arena, cpad_addr, cpad_size, + pad_size = (uintptr_t)ret - (uintptr_t)pad_addr; + if (pad_size != 0) { + extent_init(pad, arena, pad_addr, pad_size, false, true, false, true, false); } dss_next = (void *)((uintptr_t)ret + size); if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(tsdn, &dss_mtx); - return (NULL); - } - incr = gap_size + cpad_size + size; + (uintptr_t)dss_next < (uintptr_t)dss_max) + break; /* Wrap-around. */ + incr = pad_size + size; dss_prev = chunk_dss_sbrk(incr); + if (dss_prev == (void *)-1) + break; if (dss_prev == dss_max) { /* Success. */ dss_max = dss_next; malloc_mutex_unlock(tsdn, &dss_mtx); - if (cpad_size != 0) { + if (pad_size != 0) { chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; chunk_dalloc_wrapper(tsdn, arena, - &chunk_hooks, cpad); - } + &chunk_hooks, pad); + } else + extent_dalloc(tsdn, arena, pad); if (*zero) memset(ret, 0, size); if (!*commit) *commit = pages_decommit(ret, size); return (ret); } - } while (dss_prev != (void *)-1); + } } + /* OOM. */ malloc_mutex_unlock(tsdn, &dss_mtx); - + extent_dalloc(tsdn, arena, pad); return (NULL); } diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 13708027..e1ee26f4 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -50,7 +50,6 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, */ assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); ret = pages_map(new_addr, size, commit); if (ret == NULL || ret == new_addr) diff --git a/src/huge.c b/src/huge.c index 31d3bcae..69cf034a 100644 --- a/src/huge.c +++ b/src/huge.c @@ -9,7 +9,7 @@ huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) assert(usize == s2u(usize)); - return (huge_palloc(tsdn, arena, usize, chunksize, zero)); + return (huge_palloc(tsdn, arena, usize, PAGE, zero)); } void * @@ -20,14 +20,11 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, extent_t *extent; bool is_zeroed; - /* Allocate one or more contiguous chunks for this request. */ - assert(!tsdn_null(tsdn) || arena != NULL); ausize = sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) return (NULL); - assert(ausize >= chunksize); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that @@ -40,9 +37,6 @@ huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, arena, usize, alignment, &is_zeroed)) == NULL) return (NULL); - if (usize < extent_size_get(extent)) - extent_size_set(extent, usize); - /* Insert extent into huge. */ malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_elm_new(extent, ql_link); @@ -86,80 +80,20 @@ huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize) huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif -static void -huge_ralloc_no_move_similar(tsdn_t *tsdn, extent_t *extent, size_t usize_min, - size_t usize_max, bool zero) -{ - size_t usize, usize_next; - arena_t *arena = extent_arena_get(extent); - size_t oldsize = extent_size_get(extent); - chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; - bool pre_zeroed, post_zeroed; - - /* Increase usize to incorporate extra. */ - for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) - <= oldsize; usize = usize_next) - ; /* Do nothing. */ - - if (oldsize == usize) - return; - - pre_zeroed = extent_zeroed_get(extent); - - /* Fill if necessary (shrinking). */ - if (oldsize > usize) { - size_t sdiff = oldsize - usize; - if (config_fill && unlikely(opt_junk_free)) { - memset((void *)((uintptr_t)extent_addr_get(extent) + - usize), JEMALLOC_FREE_JUNK, sdiff); - post_zeroed = false; - } else { - post_zeroed = !chunk_purge_wrapper(tsdn, arena, - &chunk_hooks, extent, usize, sdiff); - } - } else - post_zeroed = pre_zeroed; - - /* Update the size of the huge allocation. */ - assert(extent_size_get(extent) != usize); - malloc_mutex_lock(tsdn, &arena->huge_mtx); - extent_size_set(extent, usize); - malloc_mutex_unlock(tsdn, &arena->huge_mtx); - /* Update zeroed. */ - extent_zeroed_set(extent, post_zeroed); - - arena_chunk_ralloc_huge_similar(tsdn, arena, extent, oldsize); - - /* Fill if necessary (growing). */ - if (oldsize < usize) { - if (zero || (config_fill && unlikely(opt_zero))) { - if (!pre_zeroed) { - memset((void *) - ((uintptr_t)extent_addr_get(extent) + - oldsize), 0, usize - oldsize); - } - } else if (config_fill && unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)extent_addr_get(extent) + - oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize); - } - } -} - static bool huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { arena_t *arena = extent_arena_get(extent); size_t oldsize = extent_size_get(extent); chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); - size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); - size_t sdiff = CHUNK_CEILING(usize) - usize; + size_t diff = oldsize - usize; assert(oldsize > usize); - /* Split excess chunks. */ - if (cdiff != 0) { + /* Split excess pages. */ + if (diff != 0) { extent_t *trail = chunk_split_wrapper(tsdn, arena, &chunk_hooks, - extent, CHUNK_CEILING(usize), cdiff); + extent, usize, diff); if (trail == NULL) return (true); @@ -171,28 +105,6 @@ huge_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) arena_chunk_cache_dalloc(tsdn, arena, &chunk_hooks, trail); } - /* Optionally fill trailing subchunk. */ - if (sdiff != 0) { - bool post_zeroed; - - if (config_fill && unlikely(opt_junk_free)) { - huge_dalloc_junk(tsdn, - (void *)((uintptr_t)extent_addr_get(extent) + - usize), sdiff); - post_zeroed = false; - } else { - post_zeroed = !chunk_purge_wrapper(tsdn, arena, - &chunk_hooks, extent, usize, sdiff); - - if (config_fill && unlikely(opt_zero) && !post_zeroed) { - memset((void *) - ((uintptr_t)extent_addr_get(extent) + - usize), 0, sdiff); - } - } - extent_zeroed_set(extent, post_zeroed); - } - arena_chunk_ralloc_huge_shrink(tsdn, arena, extent, oldsize); return (false); @@ -204,20 +116,18 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, { arena_t *arena = extent_arena_get(extent); size_t oldsize = extent_size_get(extent); - bool is_zeroed_subchunk = extent_zeroed_get(extent); - bool is_zeroed_chunk = false; + bool is_zeroed_trail = false; chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); - size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); - void *nchunk = - (void *)CHUNK_CEILING((uintptr_t)extent_past_get(extent)); + size_t trailsize = usize - oldsize; extent_t *trail; - if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks, nchunk, - cdiff, chunksize, &is_zeroed_chunk)) == NULL) { + if ((trail = arena_chunk_cache_alloc(tsdn, arena, &chunk_hooks, + extent_past_get(extent), trailsize, PAGE, &is_zeroed_trail)) == + NULL) { bool commit = true; if ((trail = chunk_alloc_wrapper(tsdn, arena, &chunk_hooks, - nchunk, cdiff, chunksize, &is_zeroed_chunk, &commit, false)) - == NULL) + extent_past_get(extent), trailsize, PAGE, &is_zeroed_trail, + &commit, false)) == NULL) return (true); } @@ -227,23 +137,15 @@ huge_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, } if (zero || (config_fill && unlikely(opt_zero))) { - if (!is_zeroed_subchunk) { + if (!is_zeroed_trail) { memset((void *)((uintptr_t)extent_addr_get(extent) + - oldsize), 0, CHUNK_CEILING(oldsize) - oldsize); - } - if (!is_zeroed_chunk) { - memset((void *)((uintptr_t)extent_addr_get(extent) + - CHUNK_CEILING(oldsize)), 0, usize - - CHUNK_CEILING(oldsize)); + oldsize), 0, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)extent_addr_get(extent) + oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize); } - if (usize < extent_size_get(extent)) - extent_size_set(extent, usize); - arena_chunk_ralloc_huge_expand(tsdn, arena, extent, oldsize); return (false); @@ -260,7 +162,7 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, /* Both allocation sizes must be huge to avoid a move. */ assert(extent_size_get(extent) >= chunksize && usize_max >= chunksize); - if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(extent_size_get(extent))) { + if (usize_max > extent_size_get(extent)) { /* Attempt to expand the allocation in-place. */ if (!huge_ralloc_no_move_expand(tsdn, extent, usize_max, zero)) { @@ -268,9 +170,9 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, return (false); } /* Try again, this time with usize_min. */ - if (usize_min < usize_max && CHUNK_CEILING(usize_min) > - CHUNK_CEILING(extent_size_get(extent)) && - huge_ralloc_no_move_expand(tsdn, extent, usize_min, zero)) { + if (usize_min < usize_max && usize_min > extent_size_get(extent) + && huge_ralloc_no_move_expand(tsdn, extent, usize_min, + zero)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); } @@ -280,17 +182,14 @@ huge_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, * Avoid moving the allocation if the existing chunk size accommodates * the new size. */ - if (CHUNK_CEILING(extent_size_get(extent)) >= CHUNK_CEILING(usize_min) - && CHUNK_CEILING(extent_size_get(extent)) <= - CHUNK_CEILING(usize_max)) { - huge_ralloc_no_move_similar(tsdn, extent, usize_min, usize_max, - zero); + if (extent_size_get(extent) >= usize_min && extent_size_get(extent) <= + usize_max) { arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); } /* Attempt to shrink the allocation in-place. */ - if (CHUNK_CEILING(extent_size_get(extent)) > CHUNK_CEILING(usize_max)) { + if (extent_size_get(extent) > usize_max) { if (!huge_ralloc_no_move_shrink(tsdn, extent, usize_max)) { arena_decay_tick(tsdn, extent_arena_get(extent)); return (false); @@ -304,7 +203,7 @@ huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { - if (alignment <= chunksize) + if (alignment <= PAGE) return (huge_malloc(tsdn, arena, usize, zero)); return (huge_palloc(tsdn, arena, usize, alignment, zero)); } diff --git a/test/integration/chunk.c b/test/integration/chunk.c index ff9bf967..092472c6 100644 --- a/test/integration/chunk.c +++ b/test/integration/chunk.c @@ -224,22 +224,6 @@ TEST_BEGIN(test_chunk) do_dalloc = true; do_decommit = false; - /* Test purge for partial-chunk huge allocations. */ - if (huge0 * 2 > huge2) { - /* - * There are at least four size classes per doubling, so a - * successful xallocx() from size=huge2 to size=huge1 is - * guaranteed to leave trailing purgeable memory. - */ - p = mallocx(huge2, flags); - assert_ptr_not_null(p, "Unexpected mallocx() error"); - did_purge = false; - assert_zu_eq(xallocx(p, huge1, 0, flags), huge1, - "Unexpected xallocx() failure"); - assert_true(did_purge, "Expected purge"); - dallocx(p, flags); - } - /* Test decommit for large allocations. */ do_decommit = true; p = mallocx(large1, flags);