If MALLOCX_ARENA(a) is specified, use it during tcache fill.

This commit is contained in:
Jason Evans 2015-02-13 15:28:56 -08:00
parent feaaa3df0d
commit 41cfe03f39
3 changed files with 37 additions and 36 deletions

View File

@ -985,28 +985,26 @@ arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
assert(size != 0); assert(size != 0);
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
if (likely(size <= SMALL_MAXCLASS)) {
if (likely(tcache != NULL))
return (tcache_alloc_small(tsd, tcache, size, zero));
else {
arena = arena_choose(tsd, arena); arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL)) if (unlikely(arena == NULL))
return (NULL); return (NULL);
if (likely(size <= SMALL_MAXCLASS)) {
if (likely(tcache != NULL)) {
return (tcache_alloc_small(tsd, arena, tcache, size,
zero));
} else
return (arena_malloc_small(arena, size, zero)); return (arena_malloc_small(arena, size, zero));
}
} else if (likely(size <= arena_maxclass)) { } else if (likely(size <= arena_maxclass)) {
/* /*
* Initialize tcache after checking size in order to avoid * Initialize tcache after checking size in order to avoid
* infinite recursion during tcache initialization. * infinite recursion during tcache initialization.
*/ */
if (likely(tcache != NULL) && size <= tcache_maxclass) if (likely(tcache != NULL) && size <= tcache_maxclass) {
return (tcache_alloc_large(tsd, tcache, size, zero)); return (tcache_alloc_large(tsd, arena, tcache, size,
else { zero));
arena = arena_choose(tsd, arena); } else
if (unlikely(arena == NULL))
return (NULL);
return (arena_malloc_large(arena, size, zero)); return (arena_malloc_large(arena, size, zero));
}
} else } else
return (huge_malloc(tsd, arena, size, zero, tcache)); return (huge_malloc(tsd, arena, size, zero, tcache));
} }

View File

@ -120,10 +120,10 @@ extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr); size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, index_t binind); tcache_bin_t *tbin, index_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
unsigned rem, tcache_t *tcache); index_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
unsigned rem, tcache_t *tcache); unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena); void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
@ -151,10 +151,10 @@ bool tcache_enabled_get(void);
tcache_t *tcache_get(tsd_t *tsd, bool create); tcache_t *tcache_get(tsd_t *tsd, bool create);
void tcache_enabled_set(bool enabled); void tcache_enabled_set(bool enabled);
void *tcache_alloc_easy(tcache_bin_t *tbin); void *tcache_alloc_easy(tcache_bin_t *tbin);
void *tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
bool zero); size_t size, bool zero);
void *tcache_alloc_large(tsd_t *tsd, tcache_t *tcache, size_t size, void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
bool zero); size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
index_t binind); index_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
@ -258,7 +258,8 @@ tcache_alloc_easy(tcache_bin_t *tbin)
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{ {
void *ret; void *ret;
index_t binind; index_t binind;
@ -271,7 +272,7 @@ tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero)
usize = index2size(binind); usize = index2size(binind);
ret = tcache_alloc_easy(tbin); ret = tcache_alloc_easy(tbin);
if (unlikely(ret == NULL)) { if (unlikely(ret == NULL)) {
ret = tcache_alloc_small_hard(tsd, tcache, tbin, binind); ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
} }
@ -302,7 +303,8 @@ tcache_alloc_small(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero)
} }
JEMALLOC_ALWAYS_INLINE void * JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero) tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{ {
void *ret; void *ret;
index_t binind; index_t binind;
@ -320,7 +322,7 @@ tcache_alloc_large(tsd_t *tsd, tcache_t *tcache, size_t size, bool zero)
* Only allocate one large object at a time, because it's quite * Only allocate one large object at a time, because it's quite
* expensive to create one and not use it. * expensive to create one and not use it.
*/ */
ret = arena_malloc_large(arena_choose(tsd, NULL), usize, zero); ret = arena_malloc_large(arena, usize, zero);
if (ret == NULL) if (ret == NULL)
return (NULL); return (NULL);
} else { } else {
@ -366,8 +368,8 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
tbin = &tcache->tbins[binind]; tbin = &tcache->tbins[binind];
tbin_info = &tcache_bin_info[binind]; tbin_info = &tcache_bin_info[binind];
if (unlikely(tbin->ncached == tbin_info->ncached_max)) { if (unlikely(tbin->ncached == tbin_info->ncached_max)) {
tcache_bin_flush_small(tsd, tbin, binind, tcache_bin_flush_small(tsd, tcache, tbin, binind,
(tbin_info->ncached_max >> 1), tcache); (tbin_info->ncached_max >> 1));
} }
assert(tbin->ncached < tbin_info->ncached_max); assert(tbin->ncached < tbin_info->ncached_max);
tbin->avail[tbin->ncached] = ptr; tbin->avail[tbin->ncached] = ptr;

View File

@ -41,8 +41,9 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
* Flush (ceiling) 3/4 of the objects below the low water mark. * Flush (ceiling) 3/4 of the objects below the low water mark.
*/ */
if (binind < NBINS) { if (binind < NBINS) {
tcache_bin_flush_small(tsd, tbin, binind, tbin->ncached tcache_bin_flush_small(tsd, tcache, tbin, binind,
- tbin->low_water + (tbin->low_water >> 2), tcache); tbin->ncached - tbin->low_water + (tbin->low_water
>> 2));
} else { } else {
tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
- tbin->low_water + (tbin->low_water >> 2), tcache); - tbin->low_water + (tbin->low_water >> 2), tcache);
@ -70,13 +71,13 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
} }
void * void *
tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
index_t binind) tcache_bin_t *tbin, index_t binind)
{ {
void *ret; void *ret;
arena_tcache_fill_small(arena_choose(tsd, NULL), tbin, binind, arena_tcache_fill_small(arena, tbin, binind, config_prof ?
config_prof ? tcache->prof_accumbytes : 0); tcache->prof_accumbytes : 0);
if (config_prof) if (config_prof)
tcache->prof_accumbytes = 0; tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin); ret = tcache_alloc_easy(tbin);
@ -85,8 +86,8 @@ tcache_alloc_small_hard(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
} }
void void
tcache_bin_flush_small(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
unsigned rem, tcache_t *tcache) index_t binind, unsigned rem)
{ {
arena_t *arena; arena_t *arena;
void *ptr; void *ptr;
@ -350,7 +351,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
for (i = 0; i < NBINS; i++) { for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_t *tbin = &tcache->tbins[i];
tcache_bin_flush_small(tsd, tbin, i, 0, tcache); tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
if (config_stats && tbin->tstats.nrequests != 0) { if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i]; arena_bin_t *bin = &arena->bins[i];