Refactor huge_*() calls into arena internals.

Make redirects to the huge_*() API the arena code's responsibility,
since arenas now take responsibility for all allocation sizes.
This commit is contained in:
Jason Evans
2015-02-12 14:06:37 -08:00
parent 1eaf3b6f34
commit 88fef7ceda
4 changed files with 238 additions and 227 deletions

View File

@@ -823,18 +823,10 @@ bool ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
JEMALLOC_ALWAYS_INLINE arena_t *
iaalloc(const void *ptr)
{
arena_t *arena;
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
arena = arena_aalloc(ptr);
else
arena = huge_aalloc(ptr);
return (arena);
return (arena_aalloc(ptr));
}
/*
@@ -845,20 +837,12 @@ iaalloc(const void *ptr)
JEMALLOC_ALWAYS_INLINE size_t
isalloc(const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
ret = arena_salloc(ptr, demote);
else
ret = huge_salloc(ptr);
return (ret);
return (arena_salloc(ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -869,10 +853,7 @@ iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata
assert(size != 0);
if (likely(size <= arena_maxclass))
ret = arena_malloc(tsd, arena, size, zero, tcache);
else
ret = huge_malloc(tsd, arena, size, zero, tcache);
ret = arena_malloc(tsd, arena, size, zero, tcache);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
config_prof));
@@ -917,21 +898,7 @@ ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
if (usize <= SMALL_MAXCLASS && alignment < PAGE)
ret = arena_malloc(tsd, arena, usize, zero, tcache);
else {
if (likely(usize <= arena_maxclass)) {
arena = arena_choose(tsd, arena);
if (unlikely(arena == NULL))
return (NULL);
ret = arena_palloc(arena, usize, alignment, zero);
} else if (likely(alignment <= chunksize))
ret = huge_malloc(tsd, arena, usize, zero, tcache);
else {
ret = huge_palloc(tsd, arena, usize, alignment, zero,
tcache);
}
}
ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
@@ -1033,15 +1000,8 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
{
arena_chunk_t *chunk;
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr))
arena_sdalloc(tsd, chunk, ptr, size, tcache);
else
huge_dalloc(tsd, ptr, tcache);
arena_sdalloc(tsd, ptr, size, tcache);
}
JEMALLOC_ALWAYS_INLINE void
@@ -1104,13 +1064,8 @@ iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
zero, tcache, arena));
}
if (likely(size <= arena_maxclass)) {
return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0,
alignment, zero, tcache));
} else {
return (huge_ralloc(tsd, arena, ptr, oldsize, size, 0,
alignment, zero, tcache));
}
return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero,
tcache));
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1136,10 +1091,7 @@ ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
return (true);
}
if (likely(size <= arena_maxclass))
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
else
return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
}
#endif