bit_util: Change ffs functions indexing.

Making these 0-based instead of 1-based makes calling code simpler and will be
more consistent with functions introduced in subsequent diffs.
This commit is contained in:
David Goldblatt
2020-07-22 08:07:12 -07:00
committed by David Goldblatt
parent 786a27b9e5
commit 1ed0288d9c
5 changed files with 86 additions and 24 deletions

View File

@@ -11,20 +11,29 @@
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
/*
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
* input, and returns the position of the lowest bit set (as opposed to the
* posix versions, which return 1 larger than that position and use a return
* value of zero as a sentinel. This tends to simplify logic in callers, and
* allows for consistency with the builtins we build fls on top of.
*/
BIT_UTIL_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return JEMALLOC_INTERNAL_FFSLL(bitmap);
ffs_llu(unsigned long long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_FFSL(bitmap);
ffs_lu(unsigned long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_u(unsigned bitmap) {
return JEMALLOC_INTERNAL_FFS(bitmap);
ffs_u(unsigned x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFS(x) - 1;
}
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
@@ -41,7 +50,8 @@ popcount_lu(unsigned long bitmap) {
BIT_UTIL_INLINE size_t
cfs_lu(unsigned long* bitmap) {
size_t bit = ffs_lu(*bitmap) - 1;
util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
return bit;
}
@@ -209,7 +219,7 @@ lg_floor(size_t x) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
return ffs_zu(x) - 1;
}
#endif

View File

@@ -272,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
@@ -284,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1);
size_t bit;
do {
bit = ffs_lu(g);
if (bit != 0) {
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
if (g != 0) {
bit = ffs_lu(g);
return (i << LG_BITMAP_GROUP_NBITS) + bit;
}
i++;
g = bitmap[i];
@@ -307,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
bit = ffs_lu(g);
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
while (g == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif
bitmap_set(bitmap, binfo, bit);
return bit;

View File

@@ -136,7 +136,7 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
lg_range = ffs_u32(pow2_ceil_u32(range));
/* Generate a result in [0..range) via repeated trial. */
do {
@@ -154,7 +154,7 @@ prng_range_u64(uint64_t *state, uint64_t range) {
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
do {
@@ -172,7 +172,7 @@ prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
do {