Convert to uniform style: cond == false --> !cond

This commit is contained in:
Jason Evans 2014-10-03 10:16:09 -07:00
parent ebbd0c91f0
commit 551ebc4364
20 changed files with 111 additions and 115 deletions

View File

@ -1111,13 +1111,12 @@ arena_salloc(const void *ptr, bool demote)
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0); assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
binind = arena_mapbits_binind_get(chunk, pageind); binind = arena_mapbits_binind_get(chunk, pageind);
if (unlikely(binind == BININD_INVALID || (config_prof && demote == false if (unlikely(binind == BININD_INVALID || (config_prof && !demote &&
&& arena_mapbits_large_get(chunk, pageind) != 0))) { arena_mapbits_large_get(chunk, pageind) != 0))) {
/* /*
* Large allocation. In the common case (demote == true), and * Large allocation. In the common case (demote), and as this
* as this is an inline function, most callers will only end up * is an inline function, most callers will only end up looking
* looking at binind to determine that ptr is a small * at binind to determine that ptr is a small allocation.
* allocation.
*/ */
assert(((uintptr_t)ptr & PAGE_MASK) == 0); assert(((uintptr_t)ptr & PAGE_MASK) == 0);
ret = arena_mapbits_large_size_get(chunk, pageind); ret = arena_mapbits_large_size_get(chunk, pageind);

View File

@ -139,7 +139,7 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
bitmap_t g; bitmap_t g;
assert(bit < binfo->nbits); assert(bit < binfo->nbits);
assert(bitmap_get(bitmap, binfo, bit) == false); assert(!bitmap_get(bitmap, binfo, bit));
goff = bit >> LG_BITMAP_GROUP_NBITS; goff = bit >> LG_BITMAP_GROUP_NBITS;
gp = &bitmap[goff]; gp = &bitmap[goff];
g = *gp; g = *gp;
@ -172,7 +172,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
bitmap_t g; bitmap_t g;
unsigned i; unsigned i;
assert(bitmap_full(bitmap, binfo) == false); assert(!bitmap_full(bitmap, binfo));
i = binfo->nlevels - 1; i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset]; g = bitmap[binfo->levels[i].group_offset];
@ -204,7 +204,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g; *gp = g;
assert(bitmap_get(bitmap, binfo, bit) == false); assert(!bitmap_get(bitmap, binfo, bit));
/* Propagate group state transitions up the tree. */ /* Propagate group state transitions up the tree. */
if (propagate) { if (propagate) {
unsigned i; unsigned i;
@ -218,7 +218,7 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit)
== 0); == 0);
g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK);
*gp = g; *gp = g;
if (propagate == false) if (!propagate)
break; break;
} }
} }

View File

@ -714,7 +714,7 @@ isalloc(const void *ptr, bool demote)
assert(ptr != NULL); assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */ /* Demotion only makes sense if config_prof is true. */
assert(config_prof || demote == false); assert(config_prof || !demote);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) if (chunk != ptr)

View File

@ -388,7 +388,7 @@ prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
/* Compute new sample threshold. */ /* Compute new sample threshold. */
if (update) if (update)
prof_sample_threshold_update(tdata); prof_sample_threshold_update(tdata);
return (tdata->active == false); return (!tdata->active);
} }
} }

View File

@ -593,7 +593,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
if (left != &rbtree->rbt_nil) { \ if (left != &rbtree->rbt_nil) { \
/* node has no successor, but it has a left child. */\ /* node has no successor, but it has a left child. */\
/* Splice node out, without losing the left child. */\ /* Splice node out, without losing the left child. */\
assert(rbtn_red_get(a_type, a_field, node) == false); \ assert(!rbtn_red_get(a_type, a_field, node)); \
assert(rbtn_red_get(a_type, a_field, left)); \ assert(rbtn_red_get(a_type, a_field, left)); \
rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \ if (pathp == path) { \
@ -629,8 +629,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
if (pathp->cmp < 0) { \ if (pathp->cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp->node, \ rbtn_left_set(a_type, a_field, pathp->node, \
pathp[1].node); \ pathp[1].node); \
assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ assert(!rbtn_red_get(a_type, a_field, pathp[1].node)); \
== false); \
if (rbtn_red_get(a_type, a_field, pathp->node)) { \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \
a_type *right = rbtn_right_get(a_type, a_field, \ a_type *right = rbtn_right_get(a_type, a_field, \
pathp->node); \ pathp->node); \
@ -862,7 +861,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
} \ } \
/* Set root. */ \ /* Set root. */ \
rbtree->rbt_root = path->node; \ rbtree->rbt_root = path->node; \
assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \
} \ } \
a_attr a_type * \ a_attr a_type * \
a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \

View File

@ -191,9 +191,9 @@ tcache_get(tsd_t *tsd, bool create)
{ {
tcache_t *tcache; tcache_t *tcache;
if (config_tcache == false) if (!config_tcache)
return (NULL); return (NULL);
if (config_lazy_lock && isthreaded == false) if (config_lazy_lock && !isthreaded)
return (NULL); return (NULL);
/* /*
* If create is true, the caller has already assured that tsd is * If create is true, the caller has already assured that tsd is
@ -261,7 +261,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} }
assert(tcache_salloc(ret) == size); assert(tcache_salloc(ret) == size);
if (likely(zero == false)) { if (likely(!zero)) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk)) { if (unlikely(opt_junk)) {
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
@ -315,7 +315,7 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
arena_mapbits_large_binind_set(chunk, pageind, arena_mapbits_large_binind_set(chunk, pageind,
BININD_INVALID); BININD_INVALID);
} }
if (likely(zero == false)) { if (likely(!zero)) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk)) if (unlikely(opt_junk))
memset(ret, 0xa5, size); memset(ret, 0xa5, size);

View File

@ -178,7 +178,7 @@ arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
void *rpages; void *rpages;
assert(run->nfree > 0); assert(run->nfree > 0);
assert(bitmap_full(run->bitmap, &bin_info->bitmap_info) == false); assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info); regind = bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
miscelm = arena_run_to_miscelm(run); miscelm = arena_run_to_miscelm(run);
@ -524,7 +524,7 @@ arena_chunk_init_hard(arena_t *arena)
* There is no need to initialize the internal page map entries unless * There is no need to initialize the internal page map entries unless
* the chunk is not zeroed. * the chunk is not zeroed.
*/ */
if (zero == false) { if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
(void *)arena_bitselm_get(chunk, map_bias+1), (void *)arena_bitselm_get(chunk, map_bias+1),
(size_t)((uintptr_t) arena_bitselm_get(chunk, (size_t)((uintptr_t) arena_bitselm_get(chunk,
@ -782,7 +782,7 @@ arena_compute_npurge(arena_t *arena, bool all)
* Compute the minimum number of pages that this thread should try to * Compute the minimum number of pages that this thread should try to
* purge. * purge.
*/ */
if (all == false) { if (!all) {
size_t threshold = (arena->nactive >> opt_lg_dirty_mult); size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
npurge = arena->ndirty - threshold; npurge = arena->ndirty - threshold;
@ -829,7 +829,7 @@ arena_stash_dirty(arena_t *arena, bool all, size_t npurge,
nstashed += npages; nstashed += npages;
if (all == false && nstashed >= npurge) if (!all && nstashed >= npurge)
break; break;
} }
@ -1049,7 +1049,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
*/ */
assert(arena_mapbits_dirty_get(chunk, run_ind) == assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) if (!cleaned && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true; dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
@ -1481,10 +1481,10 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
bin->stats.nrequests++; bin->stats.nrequests++;
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) if (config_prof && !isthreaded && arena_prof_accum(arena, size))
prof_idump(); prof_idump();
if (zero == false) { if (!zero) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk)) { if (unlikely(opt_junk)) {
arena_alloc_junk_small(ret, arena_alloc_junk_small(ret,
@ -1537,7 +1537,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
if (config_prof && idump) if (config_prof && idump)
prof_idump(); prof_idump();
if (zero == false) { if (!zero) {
if (config_fill) { if (config_fill) {
if (unlikely(opt_junk)) if (unlikely(opt_junk))
memset(ret, 0xa5, size); memset(ret, 0xa5, size);
@ -1608,7 +1608,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
} }
malloc_mutex_unlock(&arena->lock); malloc_mutex_unlock(&arena->lock);
if (config_fill && zero == false) { if (config_fill && !zero) {
if (unlikely(opt_junk)) if (unlikely(opt_junk))
memset(ret, 0xa5, size); memset(ret, 0xa5, size);
else if (unlikely(opt_zero)) else if (unlikely(opt_zero))
@ -2008,7 +2008,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool ret = arena_ralloc_large_grow(arena, chunk, ptr, bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size), oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero); psize - PAGE_CEILING(size), zero);
if (config_fill && ret == false && zero == false) { if (config_fill && !ret && !zero) {
if (unlikely(opt_junk)) { if (unlikely(opt_junk)) {
memset((void *)((uintptr_t)ptr + memset((void *)((uintptr_t)ptr +
oldsize), 0xa5, isalloc(ptr, oldsize), 0xa5, isalloc(ptr,
@ -2044,8 +2044,8 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
} else { } else {
assert(size <= arena_maxclass); assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) { if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size, if (!arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false) extra, zero))
return (false); return (false);
} }
} }
@ -2064,7 +2064,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
return (ptr); return (ptr);
/* /*
@ -2130,7 +2130,7 @@ bool
arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
{ {
if (have_dss == false) if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&arena->lock); malloc_mutex_lock(&arena->lock);
arena->dss_prec = dss_prec; arena->dss_prec = dss_prec;

View File

@ -121,7 +121,7 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
if (node != NULL) if (node != NULL)
base_node_dalloc(node); base_node_dalloc(node);
if (*zero) { if (*zero) {
if (zeroed == false) if (!zeroed)
memset(ret, 0, size); memset(ret, 0, size);
else if (config_debug) { else if (config_debug) {
size_t i; size_t i;
@ -136,10 +136,10 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
} }
/* /*
* If the caller specifies (*zero == false), it is still possible to receive * If the caller specifies (!*zero), it is still possible to receive zeroed
* zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
* takes advantage of this to avoid demanding zeroed chunks, but taking * advantage of this to avoid demanding zeroed chunks, but taking advantage of
* advantage of them if they are returned. * them if they are returned.
*/ */
static void * static void *
chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero, chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
@ -186,7 +186,7 @@ chunk_register(void *chunk, size_t size, bool base)
assert(chunk != NULL); assert(chunk != NULL);
assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(CHUNK_ADDR2BASE(chunk) == chunk);
if (config_ivsalloc && base == false) { if (config_ivsalloc && !base) {
if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1)) if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
return (true); return (true);
} }
@ -288,7 +288,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
extent_tree_szad_remove(chunks_szad, node); extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk; node->addr = chunk;
node->size += size; node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false)); node->zeroed = (node->zeroed && !unzeroed);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
} else { } else {
/* Coalescing forward failed, so insert a new node. */ /* Coalescing forward failed, so insert a new node. */
@ -305,7 +305,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
xnode = NULL; /* Prevent deallocation below. */ xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk; node->addr = chunk;
node->size = size; node->size = size;
node->zeroed = (unzeroed == false); node->zeroed = !unzeroed;
extent_tree_ad_insert(chunks_ad, node); extent_tree_ad_insert(chunks_ad, node);
extent_tree_szad_insert(chunks_szad, node); extent_tree_szad_insert(chunks_szad, node);
} }

View File

@ -45,7 +45,7 @@ chunk_dss_prec_get(void)
{ {
dss_prec_t ret; dss_prec_t ret;
if (have_dss == false) if (!have_dss)
return (dss_prec_disabled); return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx); malloc_mutex_lock(&dss_mtx);
ret = dss_prec_default; ret = dss_prec_default;
@ -57,7 +57,7 @@ bool
chunk_dss_prec_set(dss_prec_t dss_prec) chunk_dss_prec_set(dss_prec_t dss_prec)
{ {
if (have_dss == false) if (!have_dss)
return (dss_prec != dss_prec_disabled); return (dss_prec != dss_prec_disabled);
malloc_mutex_lock(&dss_mtx); malloc_mutex_lock(&dss_mtx);
dss_prec_default = dss_prec; dss_prec_default = dss_prec;

View File

@ -132,7 +132,7 @@ pages_purge(void *addr, size_t length)
# error "No madvise(2) flag defined for purging unused dirty pages." # error "No madvise(2) flag defined for purging unused dirty pages."
# endif # endif
int err = madvise(addr, length, JEMALLOC_MADV_PURGE); int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0); unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
# undef JEMALLOC_MADV_PURGE # undef JEMALLOC_MADV_PURGE
# undef JEMALLOC_MADV_ZEROS # undef JEMALLOC_MADV_ZEROS
#else #else
@ -209,5 +209,5 @@ chunk_dalloc_mmap(void *chunk, size_t size)
if (config_munmap) if (config_munmap)
pages_unmap(chunk, size); pages_unmap(chunk, size);
return (config_munmap == false); return (!config_munmap);
} }

View File

@ -185,7 +185,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
} }
bucket = tbucket; bucket = tbucket;
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false); return (false);
} }
} }
@ -201,12 +201,12 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */ /* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false); return (false);
/* Try to insert in secondary bucket. */ /* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) if (!ckh_try_bucket_insert(ckh, bucket, key, data))
return (false); return (false);
/* /*
@ -281,7 +281,7 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
tab = ttab; tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) { if (!ckh_rebuild(ckh, tab)) {
idalloc(tsd, tab); idalloc(tsd, tab);
break; break;
} }
@ -327,7 +327,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
tab = ttab; tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (ckh_rebuild(ckh, tab) == false) { if (!ckh_rebuild(ckh, tab)) {
idalloc(tsd, tab); idalloc(tsd, tab);
#ifdef CKH_COUNT #ifdef CKH_COUNT
ckh->nshrinks++; ckh->nshrinks++;

View File

@ -36,8 +36,7 @@ static inline const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t *node) ctl_indexed_node(const ctl_node_t *node)
{ {
return ((node->named == false) ? (const ctl_indexed_node_t *)node : return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
NULL);
} }
/******************************************************************************/ /******************************************************************************/
@ -693,7 +692,7 @@ ctl_init(void)
bool ret; bool ret;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (ctl_initialized == false) { if (!ctl_initialized) {
/* /*
* Allocate space for one extra arena stats element, which * Allocate space for one extra arena stats element, which
* contains summed stats across all arenas. * contains summed stats across all arenas.
@ -843,7 +842,7 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t mib[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node; const ctl_named_node_t *node;
if (ctl_initialized == false && ctl_init()) { if (!ctl_initialized && ctl_init()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
@ -870,7 +869,7 @@ ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
{ {
int ret; int ret;
if (ctl_initialized == false && ctl_init()) { if (!ctl_initialized && ctl_init()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
@ -888,7 +887,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
const ctl_named_node_t *node; const ctl_named_node_t *node;
size_t i; size_t i;
if (ctl_initialized == false && ctl_init()) { if (!ctl_initialized && ctl_init()) {
ret = EAGAIN; ret = EAGAIN;
goto label_return; goto label_return;
} }
@ -1015,7 +1014,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if ((c) == false) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
if (l) \ if (l) \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
@ -1038,7 +1037,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if ((c) == false) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
malloc_mutex_lock(&ctl_mtx); \ malloc_mutex_lock(&ctl_mtx); \
READONLY(); \ READONLY(); \
@ -1082,7 +1081,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \ int ret; \
t oldval; \ t oldval; \
\ \
if ((c) == false) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
READONLY(); \ READONLY(); \
oldval = (v); \ oldval = (v); \
@ -1119,7 +1118,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
t oldval; \ t oldval; \
tsd_t *tsd; \ tsd_t *tsd; \
\ \
if ((c) == false) \ if (!(c)) \
return (ENOENT); \ return (ENOENT); \
READONLY(); \ READONLY(); \
tsd = tsd_tryget(); \ tsd = tsd_tryget(); \
@ -1291,7 +1290,7 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
int ret; int ret;
bool oldval; bool oldval;
if (config_tcache == false) if (!config_tcache)
return (ENOENT); return (ENOENT);
oldval = tcache_enabled_get(); oldval = tcache_enabled_get();
@ -1315,7 +1314,7 @@ thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
{ {
int ret; int ret;
if (config_tcache == false) if (!config_tcache)
return (ENOENT); return (ENOENT);
READONLY(); READONLY();
@ -1335,7 +1334,7 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
int ret; int ret;
const char *oldname; const char *oldname;
if (config_prof == false) if (!config_prof)
return (ENOENT); return (ENOENT);
oldname = prof_thread_name_get(); oldname = prof_thread_name_get();
@ -1372,7 +1371,7 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
int ret; int ret;
bool oldval; bool oldval;
if (config_prof == false) if (!config_prof)
return (ENOENT); return (ENOENT);
oldval = prof_thread_active_get(); oldval = prof_thread_active_get();
@ -1459,7 +1458,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
} }
} }
if (match == false) { if (!match) {
ret = EINVAL; ret = EINVAL;
goto label_return; goto label_return;
} }
@ -1668,7 +1667,7 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret; int ret;
bool oldval; bool oldval;
if (config_prof == false) if (!config_prof)
return (ENOENT); return (ENOENT);
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
@ -1697,7 +1696,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret; int ret;
const char *filename = NULL; const char *filename = NULL;
if (config_prof == false) if (!config_prof)
return (ENOENT); return (ENOENT);
WRITEONLY(); WRITEONLY();
@ -1721,7 +1720,7 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
size_t lg_sample = lg_prof_sample; size_t lg_sample = lg_prof_sample;
tsd_t *tsd; tsd_t *tsd;
if (config_prof == false) if (!config_prof)
return (ENOENT); return (ENOENT);
WRITEONLY(); WRITEONLY();
@ -1847,7 +1846,7 @@ stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
const ctl_named_node_t * ret; const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx); malloc_mutex_lock(&ctl_mtx);
if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) { if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL; ret = NULL;
goto label_return; goto label_return;
} }

View File

@ -62,10 +62,10 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t size, size_t alignment,
extent_tree_ad_insert(&huge, node); extent_tree_ad_insert(&huge, node);
malloc_mutex_unlock(&huge_mtx); malloc_mutex_unlock(&huge_mtx);
if (config_fill && zero == false) { if (config_fill && !zero) {
if (unlikely(opt_junk)) if (unlikely(opt_junk))
memset(ret, 0xa5, csize); memset(ret, 0xa5, csize);
else if (unlikely(opt_zero) && is_zeroed == false) else if (unlikely(opt_zero) && !is_zeroed)
memset(ret, 0, csize); memset(ret, 0, csize);
} }
@ -85,7 +85,7 @@ huge_dalloc_junk(void *ptr, size_t usize)
* Only bother junk filling if the chunk isn't about to be * Only bother junk filling if the chunk isn't about to be
* unmapped. * unmapped.
*/ */
if (config_munmap == false || (have_dss && chunk_in_dss(ptr))) if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
memset(ptr, 0x5a, usize); memset(ptr, 0x5a, usize);
} }
} }
@ -156,7 +156,7 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize; size_t copysize;
/* Try to avoid moving the allocation. */ /* Try to avoid moving the allocation. */
if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false) if (!huge_ralloc_no_move(ptr, oldsize, size, extra))
return (ptr); return (ptr);
/* /*

View File

@ -119,7 +119,7 @@ arenas_extend(unsigned ind)
arena_t *ret; arena_t *ret;
ret = (arena_t *)base_alloc(sizeof(arena_t)); ret = (arena_t *)base_alloc(sizeof(arena_t));
if (ret != NULL && arena_new(ret, ind) == false) { if (ret != NULL && !arena_new(ret, ind)) {
arenas[ind] = ret; arenas[ind] = ret;
return (ret); return (ret);
} }
@ -326,7 +326,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
*k_p = opts; *k_p = opts;
for (accept = false; accept == false;) { for (accept = false; !accept;) {
switch (*opts) { switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
@ -361,7 +361,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
} }
} }
for (accept = false; accept == false;) { for (accept = false; !accept;) {
switch (*opts) { switch (*opts) {
case ',': case ',':
opts++; opts++;
@ -418,7 +418,7 @@ malloc_conf_init(void)
in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
if (config_fill && unlikely(in_valgrind)) { if (config_fill && unlikely(in_valgrind)) {
opt_junk = false; opt_junk = false;
assert(opt_zero == false); assert(!opt_zero);
opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
opt_redzone = true; opt_redzone = true;
} }
@ -496,8 +496,8 @@ malloc_conf_init(void)
opts = buf; opts = buf;
} }
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) { &vlen)) {
#define CONF_MATCH(n) \ #define CONF_MATCH(n) \
(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
#define CONF_HANDLE_BOOL(o, n, cont) \ #define CONF_HANDLE_BOOL(o, n, cont) \
@ -607,7 +607,7 @@ malloc_conf_init(void)
} }
} }
} }
if (match == false) { if (!match) {
malloc_conf_error("Invalid conf value", malloc_conf_error("Invalid conf value",
k, klen, v, vlen); k, klen, v, vlen);
} }
@ -697,13 +697,13 @@ malloc_init_hard(void)
return (false); return (false);
} }
#ifdef JEMALLOC_THREADED_INIT #ifdef JEMALLOC_THREADED_INIT
if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
/* Busy-wait until the initializing thread completes. */ /* Busy-wait until the initializing thread completes. */
do { do {
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
CPU_SPINWAIT; CPU_SPINWAIT;
malloc_mutex_lock(&init_lock); malloc_mutex_lock(&init_lock);
} while (malloc_initialized == false); } while (!malloc_initialized);
malloc_mutex_unlock(&init_lock); malloc_mutex_unlock(&init_lock);
return (false); return (false);
} }
@ -2011,7 +2011,7 @@ _malloc_prefork(void)
unsigned i; unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
if (malloc_initialized == false) if (!malloc_initialized)
return; return;
#endif #endif
assert(malloc_initialized); assert(malloc_initialized);
@ -2040,7 +2040,7 @@ _malloc_postfork(void)
unsigned i; unsigned i;
#ifdef JEMALLOC_MUTEX_INIT_CB #ifdef JEMALLOC_MUTEX_INIT_CB
if (malloc_initialized == false) if (!malloc_initialized)
return; return;
#endif #endif
assert(malloc_initialized); assert(malloc_initialized);

View File

@ -232,7 +232,7 @@ prof_enter(prof_tdata_t *tdata)
cassert(config_prof); cassert(config_prof);
assert(tdata->enq == false); assert(!tdata->enq);
tdata->enq = true; tdata->enq = true;
malloc_mutex_lock(&bt2gctx_mtx); malloc_mutex_lock(&bt2gctx_mtx);
@ -578,7 +578,7 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
if (opt_prof_accum) if (opt_prof_accum)
return (false); return (false);
if (tctx_tree_empty(&gctx->tctxs) == false) if (!tctx_tree_empty(&gctx->tctxs))
return (false); return (false);
if (gctx->nlimbo != 0) if (gctx->nlimbo != 0)
return (false); return (false);
@ -595,7 +595,7 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
assert(tctx->cnts.curobjs == 0); assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0); assert(tctx->cnts.curbytes == 0);
assert(opt_prof_accum == false); assert(!opt_prof_accum);
assert(tctx->cnts.accumobjs == 0); assert(tctx->cnts.accumobjs == 0);
assert(tctx->cnts.accumbytes == 0); assert(tctx->cnts.accumbytes == 0);
@ -858,7 +858,7 @@ prof_dump_open(bool propagate_err, const char *filename)
int fd; int fd;
fd = creat(filename, 0644); fd = creat(filename, 0644);
if (fd == -1 && propagate_err == false) { if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename); filename);
if (opt_abort) if (opt_abort)
@ -883,7 +883,7 @@ prof_dump_flush(bool propagate_err)
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) { if (err == -1) {
if (propagate_err == false) { if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap " malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n"); "profile flush\n");
if (opt_abort) if (opt_abort)
@ -1145,8 +1145,8 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
tdata->dumping = true; tdata->dumping = true;
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; ckh_iter(&tdata->bt2tctx, &tabind, NULL, for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v) == false;) &tctx.v);)
prof_tctx_merge_tdata(tctx.p, tdata); prof_tctx_merge_tdata(tctx.p, tdata);
cnt_all->curobjs += tdata->cnt_summed.curobjs; cnt_all->curobjs += tdata->cnt_summed.curobjs;
@ -1167,7 +1167,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{ {
bool propagate_err = *(bool *)arg; bool propagate_err = *(bool *)arg;
if (tdata->dumping == false) if (!tdata->dumping)
return (NULL); return (NULL);
if (prof_dump_printf(propagate_err, if (prof_dump_printf(propagate_err,
@ -1220,7 +1220,7 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
cassert(config_prof); cassert(config_prof);
/* Avoid dumping such gctx's that have no useful data. */ /* Avoid dumping such gctx's that have no useful data. */
if ((opt_prof_accum == false && gctx->cnt_summed.curobjs == 0) || if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
(opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
assert(gctx->cnt_summed.curobjs == 0); assert(gctx->cnt_summed.curobjs == 0);
assert(gctx->cnt_summed.curbytes == 0); assert(gctx->cnt_summed.curbytes == 0);
@ -1374,7 +1374,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
* summing. * summing.
*/ */
gctx_tree_new(&gctxs); gctx_tree_new(&gctxs);
for (tabind = 0; ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v) == false;) for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
prof_dump_gctx_prep(gctx.p, &gctxs); prof_dump_gctx_prep(gctx.p, &gctxs);
/* /*
@ -1457,7 +1457,7 @@ prof_fdump(void)
cassert(config_prof); cassert(config_prof);
if (prof_booted == false) if (!prof_booted)
return; return;
if ((tsd = tsd_tryget()) == NULL) if ((tsd = tsd_tryget()) == NULL)
return; return;
@ -1479,7 +1479,7 @@ prof_idump(void)
cassert(config_prof); cassert(config_prof);
if (prof_booted == false) if (!prof_booted)
return; return;
if ((tsd = tsd_tryget()) == NULL) if ((tsd = tsd_tryget()) == NULL)
return; return;
@ -1508,7 +1508,7 @@ prof_mdump(const char *filename)
cassert(config_prof); cassert(config_prof);
if (opt_prof == false || prof_booted == false) if (!opt_prof || !prof_booted)
return (true); return (true);
if ((tsd = tsd_tryget()) == NULL) if ((tsd = tsd_tryget()) == NULL)
return (true); return (true);
@ -1535,7 +1535,7 @@ prof_gdump(void)
cassert(config_prof); cassert(config_prof);
if (prof_booted == false) if (!prof_booted)
return; return;
if ((tsd = tsd_tryget()) == NULL) if ((tsd = tsd_tryget()) == NULL)
return; return;
@ -1855,7 +1855,7 @@ prof_boot1(void)
* initialized, so this function must be executed early. * initialized, so this function must be executed early.
*/ */
if (opt_prof_leak && opt_prof == false) { if (opt_prof_leak && !opt_prof) {
/* /*
* Enable opt_prof, but in such a way that profiles are never * Enable opt_prof, but in such a way that profiles are never
* automatically dumped. * automatically dumped.

View File

@ -505,7 +505,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
ninitialized++; ninitialized++;
} }
if (ninitialized > 1 || unmerged == false) { if (ninitialized > 1 || !unmerged) {
/* Print merged arena stats. */ /* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque, malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n"); "\nMerged arenas stats:\n");

View File

@ -101,7 +101,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
malloc_mutex_lock(&bin->lock); malloc_mutex_lock(&bin->lock);
if (config_stats && arena == tcache->arena) { if (config_stats && arena == tcache->arena) {
assert(merged_stats == false); assert(!merged_stats);
merged_stats = true; merged_stats = true;
bin->stats.nflushes++; bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests; bin->stats.nrequests += tbin->tstats.nrequests;
@ -132,7 +132,7 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
} }
malloc_mutex_unlock(&bin->lock); malloc_mutex_unlock(&bin->lock);
} }
if (config_stats && merged_stats == false) { if (config_stats && !merged_stats) {
/* /*
* The flush loop didn't happen to flush to this thread's * The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now. * arena, so the stats didn't get merged. Manually do so now.
@ -210,7 +210,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
if (config_prof && idump) if (config_prof && idump)
prof_idump(); prof_idump();
} }
if (config_stats && merged_stats == false) { if (config_stats && !merged_stats) {
/* /*
* The flush loop didn't happen to flush to this thread's * The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now. * arena, so the stats didn't get merged. Manually do so now.
@ -262,7 +262,7 @@ tcache_t *
tcache_get_hard(tsd_t *tsd) tcache_get_hard(tsd_t *tsd)
{ {
if (tcache_enabled_get() == false) { if (!tcache_enabled_get()) {
tcache_enabled_set(false); /* Memoize. */ tcache_enabled_set(false); /* Memoize. */
return (NULL); return (NULL);
} }

View File

@ -266,7 +266,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p)
sign = '-'; sign = '-';
switch (sign) { switch (sign) {
case '-': case '-':
if (neg == false) if (!neg)
break; break;
/* Fall through. */ /* Fall through. */
case ' ': case ' ':
@ -329,7 +329,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
/* Left padding. */ \ /* Left padding. */ \
size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
(size_t)width - slen : 0); \ (size_t)width - slen : 0); \
if (left_justify == false && pad_len != 0) { \ if (!left_justify && pad_len != 0) { \
size_t j; \ size_t j; \
for (j = 0; j < pad_len; j++) \ for (j = 0; j < pad_len; j++) \
APPEND_C(' '); \ APPEND_C(' '); \
@ -406,19 +406,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
while (true) { while (true) {
switch (*f) { switch (*f) {
case '#': case '#':
assert(alt_form == false); assert(!alt_form);
alt_form = true; alt_form = true;
break; break;
case '-': case '-':
assert(left_justify == false); assert(!left_justify);
left_justify = true; left_justify = true;
break; break;
case ' ': case ' ':
assert(plus_space == false); assert(!plus_space);
plus_space = true; plus_space = true;
break; break;
case '+': case '+':
assert(plus_plus == false); assert(!plus_plus);
plus_plus = true; plus_plus = true;
break; break;
default: goto label_width; default: goto label_width;

View File

@ -162,8 +162,7 @@ TEST_BEGIN(test_insert_iter_remove)
memset(seen, 0, sizeof(seen)); memset(seen, 0, sizeof(seen));
for (tabind = 0; ckh_iter(&ckh, &tabind, &q, &r) == for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) {
false;) {
size_t k; size_t k;
assert_ptr_eq(q, r, "Key and val not equal"); assert_ptr_eq(q, r, "Key and val not equal");

View File

@ -5,7 +5,7 @@
for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \
rbp_bh_t != &(a_rbt)->rbt_nil; \ rbp_bh_t != &(a_rbt)->rbt_nil; \
rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \
if (rbtn_red_get(a_type, a_field, rbp_bh_t) == false) { \ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \
(r_height)++; \ (r_height)++; \
} \ } \
} \ } \
@ -75,7 +75,7 @@ tree_recurse(node_t *node, unsigned black_height, unsigned black_depth,
node_t *left_node = rbtn_left_get(node_t, link, node); node_t *left_node = rbtn_left_get(node_t, link, node);
node_t *right_node = rbtn_right_get(node_t, link, node); node_t *right_node = rbtn_right_get(node_t, link, node);
if (rbtn_red_get(node_t, link, node) == false) if (!rbtn_red_get(node_t, link, node))
black_depth++; black_depth++;
/* Red nodes must be interleaved with black nodes. */ /* Red nodes must be interleaved with black nodes. */