From 38d9210c464c4ad49655a4da6bc84ea4fbec83d2 Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Wed, 23 Mar 2011 00:37:29 -0700 Subject: [PATCH] Fix error detection for ipalloc() when profiling. sa2u() returns 0 on overflow, but the profiling code was blindly calling sa2u() and allowing the error to silently propagate, ultimately ending in a later assertion failure. Refactor all ipalloc() callers to call sa2u(), check for overflow before calling ipalloc(), and pass usize rather than size. This allows ipalloc() to avoid calling sa2u() in the common case. --- .../jemalloc/internal/jemalloc_internal.h.in | 59 +++++++++++------- jemalloc/src/arena.c | 19 +++--- jemalloc/src/ckh.c | 28 ++++++--- jemalloc/src/jemalloc.c | 61 +++++++++++-------- 4 files changed, 105 insertions(+), 62 deletions(-) diff --git a/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in b/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in index f82385d0..254adb6d 100644 --- a/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in +++ b/jemalloc/include/jemalloc/internal/jemalloc_internal.h.in @@ -589,7 +589,7 @@ thread_allocated_get(void) #ifndef JEMALLOC_ENABLE_INLINE void *imalloc(size_t size); void *icalloc(size_t size); -void *ipalloc(size_t size, size_t alignment, bool zero); +void *ipalloc(size_t usize, size_t alignment, bool zero); size_t isalloc(const void *ptr); # ifdef JEMALLOC_IVSALLOC size_t ivsalloc(const void *ptr); @@ -623,28 +623,39 @@ icalloc(size_t size) } JEMALLOC_INLINE void * -ipalloc(size_t size, size_t alignment, bool zero) +ipalloc(size_t usize, size_t alignment, bool zero) { void *ret; - size_t usize; - size_t run_size -# ifdef JEMALLOC_CC_SILENCE - = 0 -# endif - ; - usize = sa2u(size, alignment, &run_size); - if (usize == 0) - return (NULL); + assert(usize != 0); + assert(usize == sa2u(usize, alignment, NULL)); + if (usize <= arena_maxclass && alignment <= PAGE_SIZE) ret = arena_malloc(usize, zero); - else if (run_size <= arena_maxclass) { - ret = arena_palloc(choose_arena(), usize, run_size, alignment, - zero); - } else if (alignment <= chunksize) - ret = huge_malloc(usize, zero); - else - ret = huge_palloc(usize, alignment, zero); + else { + size_t run_size +#ifdef JEMALLOC_CC_SILENCE + = 0 +#endif + ; + + /* + * Ideally we would only ever call sa2u() once per aligned + * allocation request, and the caller of this function has + * already done so once. However, it's rather burdensome to + * require every caller to pass in run_size, especially given + * that it's only relevant to large allocations. Therefore, + * just call it again here in order to get run_size. + */ + sa2u(usize, alignment, &run_size); + if (run_size <= arena_maxclass) { + ret = arena_palloc(choose_arena(), usize, run_size, + alignment, zero); + } else if (alignment <= chunksize) + ret = huge_malloc(usize, zero); + else + ret = huge_palloc(usize, alignment, zero); + } assert(((uintptr_t)ret & (alignment - 1)) == 0); return (ret); @@ -715,7 +726,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) != 0) { - size_t copysize; + size_t usize, copysize; /* * Existing object alignment is inadquate; allocate new space @@ -723,12 +734,18 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, */ if (no_move) return (NULL); - ret = ipalloc(size + extra, alignment, zero); + usize = sa2u(size + extra, alignment, NULL); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, without extra this time. */ - ret = ipalloc(size, alignment, zero); + usize = sa2u(size, alignment, NULL); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); if (ret == NULL) return (NULL); } diff --git a/jemalloc/src/arena.c b/jemalloc/src/arena.c index 0693f360..1954da97 100644 --- a/jemalloc/src/arena.c +++ b/jemalloc/src/arena.c @@ -2165,24 +2165,29 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, if (ret != NULL) return (ret); - /* * size and oldsize are different enough that we need to move the * object. In that case, fall back to allocating new space and * copying. */ - if (alignment != 0) - ret = ipalloc(size + extra, alignment, zero); - else + if (alignment != 0) { + size_t usize = sa2u(size + extra, alignment, NULL); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + } else ret = arena_malloc(size + extra, zero); if (ret == NULL) { if (extra == 0) return (NULL); /* Try again, this time without extra. */ - if (alignment != 0) - ret = ipalloc(size, alignment, zero); - else + if (alignment != 0) { + size_t usize = sa2u(size, alignment, NULL); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + } else ret = arena_malloc(size, zero); if (ret == NULL) diff --git a/jemalloc/src/ckh.c b/jemalloc/src/ckh.c index 22319ab1..143b5b5f 100644 --- a/jemalloc/src/ckh.c +++ b/jemalloc/src/ckh.c @@ -262,9 +262,15 @@ ckh_grow(ckh_t *ckh) lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; while (true) { + size_t usize; + lg_curcells++; - tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_curcells, - ZU(1) << LG_CACHELINE, true); + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL); + if (usize == 0) { + ret = true; + goto RETURN; + } + tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); if (tab == NULL) { ret = true; goto RETURN; @@ -295,7 +301,7 @@ static void ckh_shrink(ckh_t *ckh) { ckhc_t *tab, *ttab; - size_t lg_curcells; + size_t lg_curcells, usize; unsigned lg_prevbuckets; /* @@ -304,8 +310,10 @@ ckh_shrink(ckh_t *ckh) */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_curcells, - ZU(1) << LG_CACHELINE, true); + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE, NULL); + if (usize == 0) + return; + tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -340,7 +348,7 @@ bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) { bool ret; - size_t mincells; + size_t mincells, usize; unsigned lg_mincells; assert(minitems > 0); @@ -375,8 +383,12 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) ckh->hash = hash; ckh->keycomp = keycomp; - ckh->tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_mincells, - (ZU(1) << LG_CACHELINE), true); + usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE, NULL); + if (usize == 0) { + ret = true; + goto RETURN; + } + ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); if (ckh->tab == NULL) { ret = true; goto RETURN; diff --git a/jemalloc/src/jemalloc.c b/jemalloc/src/jemalloc.c index 1b8a278e..e2875169 100644 --- a/jemalloc/src/jemalloc.c +++ b/jemalloc/src/jemalloc.c @@ -993,14 +993,12 @@ int JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size) { int ret; - void *result; -#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS)) size_t usize -# ifdef JEMALLOC_CC_SILENCE +#ifdef JEMALLOC_CC_SILENCE = 0 -# endif - ; #endif + ; + void *result; #ifdef JEMALLOC_PROF prof_thr_cnt_t *cnt # ifdef JEMALLOC_CC_SILENCE @@ -1050,34 +1048,37 @@ JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size) goto RETURN; } + usize = sa2u(size, alignment, NULL); + if (usize == 0) { + result = NULL; + ret = ENOMEM; + goto RETURN; + } + #ifdef JEMALLOC_PROF if (opt_prof) { - usize = sa2u(size, alignment, NULL); if ((cnt = prof_alloc_prep(usize)) == NULL) { result = NULL; ret = EINVAL; } else { if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= small_maxclass) { - result = ipalloc(small_maxclass+1, - alignment, false); + assert(sa2u(small_maxclass+1, + alignment, NULL) != 0); + result = ipalloc(sa2u(small_maxclass+1, + alignment, NULL), alignment, false); if (result != NULL) { arena_prof_promoted(result, usize); } } else { - result = ipalloc(size, alignment, + result = ipalloc(usize, alignment, false); } } } else #endif - { -#ifdef JEMALLOC_STATS - usize = sa2u(size, alignment, NULL); -#endif - result = ipalloc(size, alignment, false); - } + result = ipalloc(usize, alignment, false); } if (result == NULL) { @@ -1531,15 +1532,18 @@ JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp, } JEMALLOC_INLINE void * -iallocm(size_t size, size_t alignment, bool zero) +iallocm(size_t usize, size_t alignment, bool zero) { + assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, alignment, + NULL))); + if (alignment != 0) - return (ipalloc(size, alignment, zero)); + return (ipalloc(usize, alignment, zero)); else if (zero) - return (icalloc(size)); + return (icalloc(usize)); else - return (imalloc(size)); + return (imalloc(usize)); } JEMALLOC_ATTR(nonnull(1)) @@ -1562,20 +1566,27 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags) if (malloc_init()) goto OOM; + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, + NULL); + if (usize == 0) + goto OOM; + #ifdef JEMALLOC_PROF if (opt_prof) { - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment, - NULL); if ((cnt = prof_alloc_prep(usize)) == NULL) goto OOM; if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= small_maxclass) { - p = iallocm(small_maxclass+1, alignment, zero); + size_t usize_promoted = (alignment == 0) ? + s2u(small_maxclass+1) : sa2u(small_maxclass+1, + alignment, NULL); + assert(usize_promoted != 0); + p = iallocm(usize_promoted, alignment, zero); if (p == NULL) goto OOM; arena_prof_promoted(p, usize); } else { - p = iallocm(size, alignment, zero); + p = iallocm(usize, alignment, zero); if (p == NULL) goto OOM; } @@ -1585,15 +1596,13 @@ JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags) } else #endif { - p = iallocm(size, alignment, zero); + p = iallocm(usize, alignment, zero); if (p == NULL) goto OOM; #ifndef JEMALLOC_STATS if (rsize != NULL) #endif { - usize = (alignment == 0) ? s2u(size) : sa2u(size, - alignment, NULL); #ifdef JEMALLOC_STATS if (rsize != NULL) #endif