From 41cfe03f39740fe61cf46d86982f66c24168de32 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Fri, 13 Feb 2015 15:28:56 -0800 Subject: [PATCH] If MALLOCX_ARENA(a) is specified, use it during tcache fill. --- include/jemalloc/internal/arena.h | 26 ++++++++++++-------------- include/jemalloc/internal/tcache.h | 28 +++++++++++++++------------- src/tcache.c | 19 ++++++++++--------- 3 files changed, 37 insertions(+), 36 deletions(-) diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 4d88736d..b195daf0 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -985,28 +985,26 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero, assert(size != 0); assert(size <= arena_maxclass); + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) + return (NULL); + if (likely(size <= SMALL_MAXCLASS)) { - if (likely(tcache != NULL)) - return (tcache_alloc_small(tsd, tcache, size, zero)); - else { - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); + if (likely(tcache != NULL)) { + return (tcache_alloc_small(tsd, arena, tcache, size, + zero)); + } else return (arena_malloc_small(arena, size, zero)); - } } else if (likely(size <= arena_maxclass)) { /* * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ - if (likely(tcache != NULL) && size <= tcache_maxclass) - return (tcache_alloc_large(tsd, tcache, size, zero)); - else { - arena = arena_choose(tsd, arena); - if (unlikely(arena == NULL)) - return (NULL); + if (likely(tcache != NULL) && size <= tcache_maxclass) { + return (tcache_alloc_large(tsd, arena, tcache, size, + zero)); + } else return (arena_malloc_large(arena, size, zero)); - } } else return (huge_malloc(tsd, arena, size, zero, tcache)); } diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index 2a3952be..d2443b12 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -120,10 +120,10 @@ extern tcaches_t *tcaches; size_t tcache_salloc(const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); -void *tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, +void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, index_t binind); -void tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, - unsigned rem, tcache_t *tcache); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + index_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_associate(tcache_t *tcache, arena_t *arena); @@ -151,10 +151,10 @@ bool tcache_enabled_get(void); tcache_t *tcache_get(tsd_t *tsd, bool create); void tcache_enabled_set(bool enabled); void *tcache_alloc_easy(tcache_bin_t *tbin); -void *tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, - bool zero); -void *tcache_alloc_large(tsd_t *tsd, tcache_t *tcache, size_t size, - bool zero); +void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, bool zero); +void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + size_t size, bool zero); void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind); void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, @@ -258,7 +258,8 @@ tcache_alloc_easy(tcache_bin_t *tbin) } JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + bool zero) { void *ret; index_t binind; @@ -271,7 +272,7 @@ tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) usize = index2size(binind); ret = tcache_alloc_easy(tbin); if (unlikely(ret == NULL)) { - ret = tcache_alloc_small_hard(tsd, tcache, tbin, binind); + ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind); if (ret == NULL) return (NULL); } @@ -302,7 +303,8 @@ tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) } JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_large(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + bool zero) { void *ret; index_t binind; @@ -320,7 +322,7 @@ tcache_alloc_large(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ - ret = arena_malloc_large(arena_choose(tsd, NULL), usize, zero); + ret = arena_malloc_large(arena, usize, zero); if (ret == NULL) return (NULL); } else { @@ -366,8 +368,8 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind) tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { - tcache_bin_flush_small(tsd, tbin, binind, - (tbin_info->ncached_max >> 1), tcache); + tcache_bin_flush_small(tsd, tcache, tbin, binind, + (tbin_info->ncached_max >> 1)); } assert(tbin->ncached < tbin_info->ncached_max); tbin->avail[tbin->ncached] = ptr; diff --git a/src/tcache.c b/src/tcache.c index 10c85dd3..318e0dc8 100644 --- a/src/tcache.c +++ b/src/tcache.c @@ -41,8 +41,9 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < NBINS) { - tcache_bin_flush_small(tsd, tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); @@ -70,13 +71,13 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) } void * -tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, - index_t binind) +tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, index_t binind) { void *ret; - arena_tcache_fill_small(arena_choose(tsd, NULL), tbin, binind, - config_prof ? tcache->prof_accumbytes : 0); + arena_tcache_fill_small(arena, tbin, binind, config_prof ? + tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; ret = tcache_alloc_easy(tbin); @@ -85,8 +86,8 @@ tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, } void -tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, - unsigned rem, tcache_t *tcache) +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + index_t binind, unsigned rem) { arena_t *arena; void *ptr; @@ -350,7 +351,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache) for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tsd, tbin, i, 0, tcache); + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats && tbin->tstats.nrequests != 0) { arena_bin_t *bin = &arena->bins[i];