Use regular arena allocation for huge tree nodes.
This avoids grabbing the base mutex, as a step towards fine-grained locking for huge allocations. The thread cache also provides a tiny (~3%) improvement for serial huge allocations.
This commit is contained in:
committed by
Jason Evans
parent
8bb3198f72
commit
f22214a29d
@@ -21,7 +21,7 @@ void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
|
||||
typedef void (huge_dalloc_junk_t)(void *, size_t);
|
||||
extern huge_dalloc_junk_t *huge_dalloc_junk;
|
||||
#endif
|
||||
void huge_dalloc(void *ptr);
|
||||
void huge_dalloc(tsd_t *tsd, void *ptr);
|
||||
size_t huge_salloc(const void *ptr);
|
||||
prof_tctx_t *huge_prof_tctx_get(const void *ptr);
|
||||
void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
|
||||
|
@@ -938,7 +938,7 @@ idalloct(tsd_t *tsd, void *ptr, bool try_tcache)
|
||||
if (chunk != ptr)
|
||||
arena_dalloc(tsd, chunk, ptr, try_tcache);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
huge_dalloc(tsd, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
@@ -952,7 +952,7 @@ isdalloct(tsd_t *tsd, void *ptr, size_t size, bool try_tcache)
|
||||
if (chunk != ptr)
|
||||
arena_sdalloc(tsd, chunk, ptr, size, try_tcache);
|
||||
else
|
||||
huge_dalloc(ptr);
|
||||
huge_dalloc(tsd, ptr);
|
||||
}
|
||||
|
||||
JEMALLOC_ALWAYS_INLINE void
|
||||
|
Reference in New Issue
Block a user