diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h index 6ab0ae71..bfb0b3cf 100644 --- a/include/jemalloc/internal/arena.h +++ b/include/jemalloc/internal/arena.h @@ -488,7 +488,8 @@ void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx); void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); size_t arena_salloc(const void *ptr, bool demote); void arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache); -void arena_sdalloc(arena_chunk_t *chunk, void *ptr, size_t size, bool try_tcache); +void arena_sdalloc(arena_chunk_t *chunk, void *ptr, size_t size, + bool try_tcache); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) @@ -539,7 +540,7 @@ small_size2bin(size_t size) { assert(size > 0); - if (size <= LOOKUP_MAXCLASS) + if (likely(size <= LOOKUP_MAXCLASS)) return (small_size2bin_lookup(size)); else return (small_size2bin_compute(size)); @@ -627,7 +628,7 @@ small_s2u(size_t size) { assert(size > 0); - if (size <= LOOKUP_MAXCLASS) + if (likely(size <= LOOKUP_MAXCLASS)) return (small_s2u_lookup(size)); else return (small_s2u_compute(size)); @@ -864,7 +865,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) cassert(config_prof); - if (prof_interval == 0) + if (likely(prof_interval == 0)) return (false); return (arena_prof_accum_impl(arena, accumbytes)); } @@ -875,7 +876,7 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes) cassert(config_prof); - if (prof_interval == 0) + if (likely(prof_interval == 0)) return (false); { @@ -995,8 +996,8 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) }; - if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + - 2)) { + if (likely(interval <= ((sizeof(interval_invs) / + sizeof(unsigned)) + 2))) { regind = (diff * interval_invs[interval - 3]) >> SIZE_INV_SHIFT; } else @@ -1025,7 +1026,7 @@ arena_prof_tctx_get(const void *ptr) pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) ret = (prof_tctx_t *)(uintptr_t)1U; else ret = arena_miscelm_get(chunk, pageind)->prof_tctx; @@ -1047,7 +1048,7 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx) pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if (arena_mapbits_large_get(chunk, pageind) != 0) + if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) arena_miscelm_get(chunk, pageind)->prof_tctx = tctx; } @@ -1059,8 +1060,9 @@ arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) assert(size != 0); assert(size <= arena_maxclass); - if (size <= SMALL_MAXCLASS) { - if (try_tcache && (tcache = tcache_get(true)) != NULL) + if (likely(size <= SMALL_MAXCLASS)) { + if (likely(try_tcache) && likely((tcache = tcache_get(true)) != + NULL)) return (tcache_alloc_small(tcache, size, zero)); else { return (arena_malloc_small(choose_arena(arena), size, @@ -1071,8 +1073,8 @@ arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) * Initialize tcache after checking size in order to avoid * infinite recursion during tcache initialization. */ - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(true)) != NULL) + if (try_tcache && size <= tcache_maxclass && likely((tcache = + tcache_get(true)) != NULL)) return (tcache_alloc_large(tcache, size, zero)); else { return (arena_malloc_large(choose_arena(arena), size, @@ -1096,8 +1098,8 @@ arena_salloc(const void *ptr, bool demote) pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; assert(arena_mapbits_allocated_get(chunk, pageind) != 0); binind = arena_mapbits_binind_get(chunk, pageind); - if (binind == BININD_INVALID || (config_prof && demote == false && - arena_mapbits_large_get(chunk, pageind) != 0)) { + if (unlikely(binind == BININD_INVALID || (config_prof && demote == false + && arena_mapbits_large_get(chunk, pageind) != 0))) { /* * Large allocation. In the common case (demote == true), and * as this is an inline function, most callers will only end up @@ -1137,10 +1139,12 @@ arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache) pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; mapbits = arena_mapbits_get(chunk, pageind); assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { + if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* Small allocation. */ - if (try_tcache && (tcache = tcache_get(false)) != NULL) { - size_t binind = arena_ptr_small_binind_get(ptr, mapbits); + if (likely(try_tcache) && likely((tcache = tcache_get(false)) != + NULL)) { + size_t binind = arena_ptr_small_binind_get(ptr, + mapbits); tcache_dalloc_small(tcache, ptr, binind); } else arena_dalloc_small(chunk->arena, chunk, ptr, pageind); @@ -1149,8 +1153,8 @@ arena_dalloc(arena_chunk_t *chunk, void *ptr, bool try_tcache) assert(((uintptr_t)ptr & PAGE_MASK) == 0); - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(false)) != NULL) { + if (try_tcache && size <= tcache_maxclass && likely((tcache = + tcache_get(false)) != NULL)) { tcache_dalloc_large(tcache, ptr, size); } else arena_dalloc_large(chunk->arena, chunk, ptr); @@ -1165,13 +1169,15 @@ arena_sdalloc(arena_chunk_t *chunk, void *ptr, size_t size, bool try_tcache) assert(ptr != NULL); assert(CHUNK_ADDR2BASE(ptr) != ptr); - if (size < PAGE) { + if (likely(size <= SMALL_MAXCLASS)) { /* Small allocation. */ - if (try_tcache && (tcache = tcache_get(false)) != NULL) { + if (likely(try_tcache) && likely((tcache = tcache_get(false)) != + NULL)) { size_t binind = small_size2bin(size); tcache_dalloc_small(tcache, ptr, binind); } else { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> + LG_PAGE; arena_dalloc_small(chunk->arena, chunk, ptr, pageind); } } else { diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in index 81d46fc3..a380a414 100644 --- a/include/jemalloc/internal/jemalloc_internal.h.in +++ b/include/jemalloc/internal/jemalloc_internal.h.in @@ -814,7 +814,7 @@ JEMALLOC_ALWAYS_INLINE void iqalloc(void *ptr, bool try_tcache) { - if (config_fill && opt_quarantine) + if (config_fill && unlikely(opt_quarantine)) quarantine(ptr); else idalloct(ptr, try_tcache); @@ -824,7 +824,7 @@ JEMALLOC_ALWAYS_INLINE void isqalloc(void *ptr, size_t size, bool try_tcache) { - if (config_fill && opt_quarantine) + if (config_fill && unlikely(opt_quarantine)) quarantine(ptr); else isdalloct(ptr, size, try_tcache); diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h index 920ec63f..a1e7ac5e 100644 --- a/include/jemalloc/internal/prof.h +++ b/include/jemalloc/internal/prof.h @@ -400,7 +400,8 @@ prof_alloc_prep(size_t usize, bool update) assert(usize == s2u(usize)); - if (!opt_prof_active || prof_sample_accum_update(usize, update, &tdata)) + if (!opt_prof_active || likely(prof_sample_accum_update(usize, update, + &tdata))) ret = (prof_tctx_t *)(uintptr_t)1U; else { bt_init(&bt, tdata->vec); @@ -419,7 +420,7 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx) assert(ptr != NULL); assert(usize == isalloc(ptr, true)); - if ((uintptr_t)tctx > (uintptr_t)1U) + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(ptr, usize, tctx); else prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); @@ -447,9 +448,9 @@ prof_realloc(const void *ptr, size_t usize, prof_tctx_t *tctx, bool updated, } } - if ((uintptr_t)old_tctx > (uintptr_t)1U) + if (unlikely((uintptr_t)old_tctx > (uintptr_t)1U)) prof_free_sampled_object(old_usize, old_tctx); - if ((uintptr_t)tctx > (uintptr_t)1U) + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(ptr, usize, tctx); else prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U); @@ -463,7 +464,7 @@ prof_free(const void *ptr, size_t usize) cassert(config_prof); assert(usize == isalloc(ptr, true)); - if ((uintptr_t)tctx > (uintptr_t)1U) + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_free_sampled_object(usize, tctx); } #endif diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h index 292ce461..c9d723aa 100644 --- a/include/jemalloc/internal/tcache.h +++ b/include/jemalloc/internal/tcache.h @@ -236,7 +236,7 @@ tcache_event(tcache_t *tcache) tcache->ev_cnt++; assert(tcache->ev_cnt <= TCACHE_GC_INCR); - if (tcache->ev_cnt == TCACHE_GC_INCR) + if (unlikely(tcache->ev_cnt == TCACHE_GC_INCR)) tcache_event_hard(tcache); } @@ -245,12 +245,12 @@ tcache_alloc_easy(tcache_bin_t *tbin) { void *ret; - if (tbin->ncached == 0) { + if (unlikely(tbin->ncached == 0)) { tbin->low_water = -1; return (NULL); } tbin->ncached--; - if ((int)tbin->ncached < tbin->low_water) + if (unlikely((int)tbin->ncached < tbin->low_water)) tbin->low_water = tbin->ncached; ret = tbin->avail[tbin->ncached]; return (ret); @@ -268,23 +268,23 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) tbin = &tcache->tbins[binind]; size = small_bin2size(binind); ret = tcache_alloc_easy(tbin); - if (ret == NULL) { + if (unlikely(ret == NULL)) { ret = tcache_alloc_small_hard(tcache, tbin, binind); if (ret == NULL) return (NULL); } assert(tcache_salloc(ret) == size); - if (zero == false) { + if (likely(zero == false)) { if (config_fill) { - if (opt_junk) { + if (unlikely(opt_junk)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); - } else if (opt_zero) + } else if (unlikely(opt_zero)) memset(ret, 0, size); } } else { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } @@ -312,7 +312,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) assert(binind < nhbins); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin); - if (ret == NULL) { + if (unlikely(ret == NULL)) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. @@ -329,11 +329,11 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } - if (zero == false) { + if (likely(zero == false)) { if (config_fill) { - if (opt_junk) + if (unlikely(opt_junk)) memset(ret, 0xa5, size); - else if (opt_zero) + else if (unlikely(opt_zero)) memset(ret, 0, size); } } else @@ -357,12 +357,12 @@ tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); - if (config_fill && opt_junk) + if (config_fill && unlikely(opt_junk)) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> 1), tcache); } @@ -386,12 +386,12 @@ tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) binind = NBINS + (size >> LG_PAGE) - 1; - if (config_fill && opt_junk) + if (config_fill && unlikely(opt_junk)) memset(ptr, 0x5a, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> 1), tcache); } diff --git a/src/arena.c b/src/arena.c index 8d34cf60..35d792a2 100644 --- a/src/arena.c +++ b/src/arena.c @@ -1365,7 +1365,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, ptr = arena_bin_malloc_hard(arena, bin); if (ptr == NULL) break; - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk)) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } @@ -1519,15 +1519,15 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) if (zero == false) { if (config_fill) { - if (opt_junk) { + if (unlikely(opt_junk)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); - } else if (opt_zero) + } else if (unlikely(opt_zero)) memset(ret, 0, size); } JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } @@ -1568,9 +1568,9 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) if (zero == false) { if (config_fill) { - if (opt_junk) + if (unlikely(opt_junk)) memset(ret, 0xa5, size); - else if (opt_zero) + else if (unlikely(opt_zero)) memset(ret, 0, size); } } @@ -1626,9 +1626,9 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) malloc_mutex_unlock(&arena->lock); if (config_fill && zero == false) { - if (opt_junk) + if (unlikely(opt_junk)) memset(ret, 0xa5, size); - else if (opt_zero) + else if (unlikely(opt_zero)) memset(ret, 0, size); } return (ret); @@ -1771,7 +1771,7 @@ arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, if (config_fill || config_stats) size = bin_info->reg_size; - if (config_fill && opt_junk) + if (config_fill && unlikely(opt_junk)) arena_dalloc_junk_small(ptr, bin_info); arena_run_reg_dalloc(run, ptr); @@ -1825,7 +1825,7 @@ static void arena_dalloc_junk_large(void *ptr, size_t usize) { - if (config_fill && opt_junk) + if (config_fill && unlikely(opt_junk)) memset(ptr, 0x5a, usize); } #ifdef JEMALLOC_JET @@ -1967,7 +1967,7 @@ static void arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk)) { memset((void *)((uintptr_t)ptr + usize), 0x5a, old_usize - usize); } @@ -2011,11 +2011,11 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, oldsize, PAGE_CEILING(size), psize - PAGE_CEILING(size), zero); if (config_fill && ret == false && zero == false) { - if (opt_junk) { + if (unlikely(opt_junk)) { memset((void *)((uintptr_t)ptr + oldsize), 0xa5, isalloc(ptr, config_prof) - oldsize); - } else if (opt_zero) { + } else if (unlikely(opt_zero)) { memset((void *)((uintptr_t)ptr + oldsize), 0, isalloc(ptr, config_prof) - oldsize); @@ -2272,7 +2272,7 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) * minimum alignment; without the padding, each redzone would have to * be twice as large in order to maintain alignment. */ - if (config_fill && opt_redzone) { + if (config_fill && unlikely(opt_redzone)) { size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1); if (align_min <= REDZONE_MINSIZE) { diff --git a/src/huge.c b/src/huge.c index e7733093..0b7db7fc 100644 --- a/src/huge.c +++ b/src/huge.c @@ -62,9 +62,9 @@ huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) malloc_mutex_unlock(&huge_mtx); if (config_fill && zero == false) { - if (opt_junk) + if (unlikely(opt_junk)) memset(ret, 0xa5, csize); - else if (opt_zero && is_zeroed == false) + else if (unlikely(opt_zero) && is_zeroed == false) memset(ret, 0, csize); } @@ -141,7 +141,7 @@ static void huge_dalloc_junk(void *ptr, size_t usize) { - if (config_fill && have_dss && opt_junk) { + if (config_fill && have_dss && unlikely(opt_junk)) { /* * Only bother junk filling if the chunk isn't about to be * unmapped. diff --git a/src/jemalloc.c b/src/jemalloc.c index f6be7514..dfb12666 100644 --- a/src/jemalloc.c +++ b/src/jemalloc.c @@ -87,7 +87,7 @@ typedef struct { #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ - if (opt_utrace) { \ + if (unlikely(opt_utrace)) { \ int utrace_serrno = errno; \ malloc_utrace_t ut; \ ut.p = (a); \ @@ -283,7 +283,7 @@ malloc_thread_init(void) * a best effort attempt at initializing its TSD by hooking all * allocation events. */ - if (config_fill && opt_quarantine) + if (config_fill && unlikely(opt_quarantine)) quarantine_alloc_hook(); } @@ -397,13 +397,13 @@ malloc_conf_init(void) */ if (config_valgrind) { in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && in_valgrind) { + if (config_fill && unlikely(in_valgrind)) { opt_junk = false; assert(opt_zero == false); opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; opt_redzone = true; } - if (config_tcache && in_valgrind) + if (config_tcache && unlikely(in_valgrind)) opt_tcache = false; } @@ -887,7 +887,7 @@ imalloc_prof(size_t usize) prof_tctx_t *tctx; tctx = prof_alloc_prep(usize, true); - if ((uintptr_t)tctx != (uintptr_t)1U) + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = imalloc_prof_sample(usize, tctx); else p = imalloc(usize); @@ -912,7 +912,7 @@ imalloc_body(size_t size, size_t *usize) return (imalloc_prof(*usize)); } - if (config_stats || (unlikely(config_valgrind && in_valgrind))) + if (config_stats || (config_valgrind && unlikely(in_valgrind))) *usize = s2u(size); return (imalloc(size)); } @@ -927,15 +927,15 @@ je_malloc(size_t size) size = 1; ret = imalloc_body(size, &usize); - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in malloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } - if (config_stats && ret != NULL) { + if (config_stats && likely(ret != NULL)) { assert(usize == isalloc(ret, config_prof)); thread_allocated_tsd_get()->allocated += usize; } @@ -970,7 +970,7 @@ imemalign_prof(size_t alignment, size_t usize) prof_tctx_t *tctx; tctx = prof_alloc_prep(usize, true); - if ((uintptr_t)tctx != (uintptr_t)1U) + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = imemalign_prof_sample(alignment, usize, tctx); else p = ipalloc(usize, alignment, false); @@ -1001,9 +1001,9 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) size = 1; /* Make sure that alignment is a large enough power of 2. */ - if (((alignment - 1) & alignment) != 0 - || (alignment < min_alignment)) { - if (config_xmalloc && opt_xmalloc) { + if (unlikely(((alignment - 1) & alignment) != 0 + || (alignment < min_alignment))) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error allocating " "aligned memory: invalid alignment\n"); abort(); @@ -1014,7 +1014,7 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) } usize = sa2u(size, alignment); - if (usize == 0) { + if (unlikely(usize == 0)) { result = NULL; goto label_oom; } @@ -1023,14 +1023,14 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) result = imemalign_prof(alignment, usize); else result = ipalloc(usize, alignment, false); - if (result == NULL) + if (unlikely(result == NULL)) goto label_oom; } *memptr = result; ret = 0; label_return: - if (config_stats && result != NULL) { + if (config_stats && likely(result != NULL)) { assert(usize == isalloc(result, config_prof)); thread_allocated_tsd_get()->allocated += usize; } @@ -1038,7 +1038,7 @@ label_return: return (ret); label_oom: assert(result == NULL); - if (config_xmalloc && opt_xmalloc) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error allocating aligned memory: " "out of memory\n"); abort(); @@ -1062,7 +1062,7 @@ je_aligned_alloc(size_t alignment, size_t size) void *ret; int err; - if ((err = imemalign(&ret, alignment, size, 1)) != 0) { + if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { ret = NULL; set_errno(err); } @@ -1096,7 +1096,7 @@ icalloc_prof(size_t usize) prof_tctx_t *tctx; tctx = prof_alloc_prep(usize, true); - if ((uintptr_t)tctx != (uintptr_t)1U) + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = icalloc_prof_sample(usize, tctx); else p = icalloc(usize); @@ -1123,7 +1123,7 @@ je_calloc(size_t num, size_t size) } num_size = num * size; - if (num_size == 0) { + if (unlikely(num_size == 0)) { if (num == 0 || size == 0) num_size = 1; else { @@ -1135,8 +1135,8 @@ je_calloc(size_t num, size_t size) * overflow during multiplication if neither operand uses any of the * most significant half of the bits in a size_t. */ - } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) - && (num_size / size != num)) { + } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << + 2))) && (num_size / size != num))) { /* size_t overflow. */ ret = NULL; goto label_return; @@ -1146,21 +1146,21 @@ je_calloc(size_t num, size_t size) usize = s2u(num_size); ret = icalloc_prof(usize); } else { - if (config_stats || unlikely(config_valgrind && in_valgrind)) + if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = s2u(num_size); ret = icalloc(num_size); } label_return: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in calloc(): out of " "memory\n"); abort(); } set_errno(ENOMEM); } - if (config_stats && ret != NULL) { + if (config_stats && likely(ret != NULL)) { assert(usize == isalloc(ret, config_prof)); thread_allocated_tsd_get()->allocated += usize; } @@ -1195,7 +1195,7 @@ irealloc_prof(void *oldptr, size_t old_usize, size_t usize) old_tctx = prof_tctx_get(oldptr); tctx = prof_alloc_prep(usize, true); - if ((uintptr_t)tctx != (uintptr_t)1U) + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) p = irealloc_prof_sample(oldptr, usize, tctx); else p = iralloc(oldptr, usize, 0, false); @@ -1222,7 +1222,7 @@ ifree(void *ptr, bool try_tcache) usize = isalloc(ptr, config_prof); if (config_stats) thread_allocated_tsd_get()->deallocated += usize; - if (unlikely(config_valgrind && in_valgrind)) + if (config_valgrind && unlikely(in_valgrind)) rzsize = p2rz(ptr); iqalloc(ptr, try_tcache); JEMALLOC_VALGRIND_FREE(ptr, rzsize); @@ -1240,7 +1240,7 @@ isfree(void *ptr, size_t usize, bool try_tcache) prof_free(ptr, usize); if (config_stats) thread_allocated_tsd_get()->deallocated += usize; - if (unlikely(config_valgrind && in_valgrind)) + if (config_valgrind && unlikely(in_valgrind)) rzsize = p2rz(ptr); isqalloc(ptr, usize, try_tcache); JEMALLOC_VALGRIND_FREE(ptr, rzsize); @@ -1254,7 +1254,7 @@ je_realloc(void *ptr, size_t size) size_t old_usize = 0; UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - if (size == 0) { + if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); @@ -1264,21 +1264,22 @@ je_realloc(void *ptr, size_t size) size = 1; } - if (ptr != NULL) { + if (likely(ptr != NULL)) { assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); if ((config_prof && opt_prof) || config_stats || - unlikely(config_valgrind && in_valgrind)) + (config_valgrind && unlikely(in_valgrind))) old_usize = isalloc(ptr, config_prof); - if (unlikely(config_valgrind && in_valgrind)) + if (config_valgrind && unlikely(in_valgrind)) old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); if (config_prof && opt_prof) { usize = s2u(size); ret = irealloc_prof(ptr, old_usize, usize); } else { - if (config_stats || unlikely(config_valgrind && in_valgrind)) + if (config_stats || (config_valgrind && + unlikely(in_valgrind))) usize = s2u(size); ret = iralloc(ptr, size, 0, false); } @@ -1287,15 +1288,15 @@ je_realloc(void *ptr, size_t size) ret = imalloc_body(size, &usize); } - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } - if (config_stats && ret != NULL) { + if (config_stats && likely(ret != NULL)) { thread_allocated_t *ta; assert(usize == isalloc(ret, config_prof)); ta = thread_allocated_tsd_get(); @@ -1313,7 +1314,7 @@ je_free(void *ptr) { UTRACE(ptr, 0, 0); - if (ptr != NULL) + if (likely(ptr != NULL)) ifree(ptr, true); } @@ -1410,7 +1411,7 @@ imallocx_flags_decode(size_t size, int flags, size_t *usize, size_t *alignment, bool *zero, bool *try_tcache, arena_t **arena) { - if (flags == 0) { + if (likely(flags == 0)) { *usize = s2u(size); assert(usize != 0); *alignment = 0; @@ -1440,7 +1441,7 @@ imallocx_maybe_flags(size_t size, int flags, size_t usize, size_t alignment, bool zero, bool try_tcache, arena_t *arena) { - if (flags == 0) + if (likely(flags == 0)) return (imalloc(size)); return (imallocx_flags(usize, alignment, zero, try_tcache, arena)); } @@ -1479,7 +1480,7 @@ imallocx_prof(size_t size, int flags, size_t *usize) imallocx_flags_decode(size, flags, usize, &alignment, &zero, &try_tcache, &arena); tctx = prof_alloc_prep(*usize, true); - if ((uintptr_t)tctx == (uintptr_t)1U) { + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { p = imallocx_maybe_flags(size, flags, *usize, alignment, zero, try_tcache, arena); } else if ((uintptr_t)tctx > (uintptr_t)1U) { @@ -1487,7 +1488,7 @@ imallocx_prof(size_t size, int flags, size_t *usize) try_tcache, arena); } else p = NULL; - if (p == NULL) { + if (unlikely(p == NULL)) { prof_alloc_rollback(tctx, true); return (NULL); } @@ -1504,8 +1505,8 @@ imallocx_no_prof(size_t size, int flags, size_t *usize) bool try_tcache; arena_t *arena; - if (flags == 0) { - if (config_stats || unlikely(config_valgrind && in_valgrind)) + if (likely(flags == 0)) { + if (config_stats || (config_valgrind && unlikely(in_valgrind))) *usize = s2u(size); return (imalloc(size)); } @@ -1530,7 +1531,7 @@ je_mallocx(size_t size, int flags) p = imallocx_prof(size, flags, &usize); else p = imallocx_no_prof(size, flags, &usize); - if (p == NULL) + if (unlikely(p == NULL)) goto label_oom; if (config_stats) { @@ -1541,7 +1542,7 @@ je_mallocx(size_t size, int flags) JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); return (p); label_oom: - if (config_xmalloc && opt_xmalloc) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in mallocx(): out of memory\n"); abort(); } @@ -1582,14 +1583,14 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, old_tctx = prof_tctx_get(oldptr); tctx = prof_alloc_prep(*usize, true); - if ((uintptr_t)tctx != (uintptr_t)1U) { + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, try_tcache_alloc, try_tcache_dalloc, arena, tctx); } else { p = iralloct(oldptr, size, alignment, zero, try_tcache_alloc, try_tcache_dalloc, arena); } - if (p == NULL) { + if (unlikely(p == NULL)) { prof_alloc_rollback(tctx, true); return (NULL); } @@ -1614,7 +1615,8 @@ void * je_rallocx(void *ptr, size_t size, int flags) { void *p; - size_t usize, old_usize; + size_t usize; + UNUSED size_t old_usize JEMALLOC_CC_SILENCE_INIT(0); UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; @@ -1626,7 +1628,7 @@ je_rallocx(void *ptr, size_t size, int flags) assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); - if ((flags & MALLOCX_ARENA_MASK) != 0) { + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena_chunk_t *chunk; try_tcache_alloc = false; @@ -1641,9 +1643,9 @@ je_rallocx(void *ptr, size_t size, int flags) } if ((config_prof && opt_prof) || config_stats || - (unlikely(config_valgrind && in_valgrind))) + ((config_valgrind && unlikely(in_valgrind)))) old_usize = isalloc(ptr, config_prof); - if (unlikely(config_valgrind && in_valgrind)) + if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { @@ -1651,14 +1653,14 @@ je_rallocx(void *ptr, size_t size, int flags) assert(usize != 0); p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, try_tcache_alloc, try_tcache_dalloc, arena); - if (p == NULL) + if (unlikely(p == NULL)) goto label_oom; } else { p = iralloct(ptr, size, alignment, zero, try_tcache_alloc, try_tcache_dalloc, arena); - if (p == NULL) + if (unlikely(p == NULL)) goto label_oom; - if (config_stats || (config_valgrind && in_valgrind)) + if (config_stats || (config_valgrind && unlikely(in_valgrind))) usize = isalloc(p, config_prof); } @@ -1673,7 +1675,7 @@ je_rallocx(void *ptr, size_t size, int flags) old_rzsize, false, zero); return (p); label_oom: - if (config_xmalloc && opt_xmalloc) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write(": Error in rallocx(): out of memory\n"); abort(); } @@ -1738,14 +1740,14 @@ ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra, alignment); tctx = prof_alloc_prep(max_usize, false); - if ((uintptr_t)tctx != (uintptr_t)1U) { + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { usize = ixallocx_prof_sample(ptr, old_usize, size, extra, alignment, zero, max_usize, arena, tctx); } else { usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero, arena); } - if (usize == old_usize) { + if (unlikely(usize == old_usize)) { prof_alloc_rollback(tctx, false); return (usize); } @@ -1769,14 +1771,14 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) assert(malloc_initialized || IS_INITIALIZER); malloc_thread_init(); - if ((flags & MALLOCX_ARENA_MASK) != 0) { + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena = arenas[arena_ind]; } else arena = NULL; old_usize = isalloc(ptr, config_prof); - if (unlikely(config_valgrind && in_valgrind)) + if (config_valgrind && unlikely(in_valgrind)) old_rzsize = u2rz(old_usize); if (config_prof && opt_prof) { @@ -1786,7 +1788,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, zero, arena); } - if (usize == old_usize) + if (unlikely(usize == old_usize)) goto label_not_resized; if (config_stats) { @@ -1828,7 +1830,7 @@ je_dallocx(void *ptr, int flags) assert(ptr != NULL); assert(malloc_initialized || IS_INITIALIZER); - if ((flags & MALLOCX_ARENA_MASK) != 0) { + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); try_tcache = (chunk == ptr || chunk->arena != @@ -1845,7 +1847,7 @@ inallocx(size_t size, int flags) { size_t usize; - if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) usize = s2u(size); else usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); @@ -1864,7 +1866,7 @@ je_sdallocx(void *ptr, size_t size, int flags) usize = inallocx(size, flags); assert(usize == isalloc(ptr, config_prof)); - if ((flags & MALLOCX_ARENA_MASK) != 0) { + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { unsigned arena_ind = MALLOCX_ARENA_GET(flags); arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); try_tcache = (chunk == ptr || chunk->arena != diff --git a/src/quarantine.c b/src/quarantine.c index 3b874422..efddeae7 100644 --- a/src/quarantine.c +++ b/src/quarantine.c @@ -141,12 +141,12 @@ quarantine(void *ptr) obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk)) { /* * Only do redzone validation if Valgrind isn't in * operation. */ - if ((config_valgrind == false || in_valgrind == false) + if ((!config_valgrind || likely(!in_valgrind)) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else