bit_util: Change ffs functions indexing.

Making these 0-based instead of 1-based makes calling code simpler and will be
more consistent with functions introduced in subsequent diffs.
This commit is contained in:
David Goldblatt 2020-07-22 08:07:12 -07:00 committed by David Goldblatt
parent 786a27b9e5
commit 1ed0288d9c
5 changed files with 86 additions and 24 deletions

View File

@ -11,20 +11,29 @@
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
/*
* Unlike the builtins and posix ffs functions, our ffs requires a non-zero
* input, and returns the position of the lowest bit set (as opposed to the
* posix versions, which return 1 larger than that position and use a return
* value of zero as a sentinel. This tends to simplify logic in callers, and
* allows for consistency with the builtins we build fls on top of.
*/
BIT_UTIL_INLINE unsigned
ffs_llu(unsigned long long bitmap) {
return JEMALLOC_INTERNAL_FFSLL(bitmap);
ffs_llu(unsigned long long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSLL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_lu(unsigned long bitmap) {
return JEMALLOC_INTERNAL_FFSL(bitmap);
ffs_lu(unsigned long x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFSL(x) - 1;
}
BIT_UTIL_INLINE unsigned
ffs_u(unsigned bitmap) {
return JEMALLOC_INTERNAL_FFS(bitmap);
ffs_u(unsigned x) {
util_assume(x != 0);
return JEMALLOC_INTERNAL_FFS(x) - 1;
}
#ifdef JEMALLOC_INTERNAL_POPCOUNTL
@ -41,7 +50,8 @@ popcount_lu(unsigned long bitmap) {
BIT_UTIL_INLINE size_t
cfs_lu(unsigned long* bitmap) {
size_t bit = ffs_lu(*bitmap) - 1;
util_assume(*bitmap != 0);
size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
return bit;
}
@ -209,7 +219,7 @@ lg_floor(size_t x) {
return (8 << LG_SIZEOF_PTR) - 1;
}
x++;
return ffs_zu(x) - 2;
return ffs_zu(x) - 1;
}
#endif

View File

@ -272,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
@ -284,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1);
size_t bit;
do {
bit = ffs_lu(g);
if (bit != 0) {
return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
if (g != 0) {
bit = ffs_lu(g);
return (i << LG_BITMAP_GROUP_NBITS) + bit;
}
i++;
g = bitmap[i];
@ -307,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
bit = ffs_lu(g) - 1;
bit = ffs_lu(g);
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
}
#else
i = 0;
g = bitmap[0];
while ((bit = ffs_lu(g)) == 0) {
while (g == 0) {
i++;
g = bitmap[i];
}
bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif
bitmap_set(bitmap, binfo, bit);
return bit;

View File

@ -136,7 +136,7 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
lg_range = ffs_u32(pow2_ceil_u32(range));
/* Generate a result in [0..range) via repeated trial. */
do {
@ -154,7 +154,7 @@ prng_range_u64(uint64_t *state, uint64_t range) {
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
do {
@ -172,7 +172,7 @@ prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
assert(range > 1);
/* Compute the ceiling of lg(range). */
lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
do {

View File

@ -211,8 +211,8 @@ pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
flags |= MAP_FIXED | MAP_EXCL;
} else {
unsigned alignment_bits = ffs_zu(alignment);
assert(alignment_bits > 1);
flags |= MAP_ALIGNED(alignment_bits - 1);
assert(alignment_bits > 0);
flags |= MAP_ALIGNED(alignment_bits);
}
void *ret = mmap(addr, size, prot, flags, -1, 0);
@ -600,7 +600,7 @@ init_thp_state(void) {
#endif
if (nread < 0) {
goto label_error;
goto label_error;
}
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {

View File

@ -101,11 +101,63 @@ TEST_BEGIN(test_lg_ceil_floor) {
}
TEST_END
#define TEST_FFS(t, suf, test_suf, pri) do { \
for (unsigned i = 0; i < sizeof(t) * 8; i++) { \
for (unsigned j = 0; j <= i; j++) { \
for (unsigned k = 0; k <= j; k++) { \
t x = (t)1 << i; \
x |= (t)1 << j; \
x |= (t)1 << k; \
expect_##test_suf##_eq(ffs_##suf(x), k, \
"Unexpected result, x=%"pri, x); \
} \
} \
} \
} while(0)
TEST_BEGIN(test_ffs_u) {
TEST_FFS(unsigned, u, u,"u");
}
TEST_END
TEST_BEGIN(test_ffs_lu) {
TEST_FFS(unsigned long, lu, lu, "lu");
}
TEST_END
TEST_BEGIN(test_ffs_llu) {
TEST_FFS(unsigned long long, llu, qd, "llu");
}
TEST_END
TEST_BEGIN(test_ffs_u32) {
TEST_FFS(uint32_t, u32, u32, FMTu32);
}
TEST_END
TEST_BEGIN(test_ffs_u64) {
TEST_FFS(uint64_t, u64, u64, FMTu64);
}
TEST_END
TEST_BEGIN(test_ffs_zu) {
TEST_FFS(size_t, zu, zu, "zu");
}
TEST_END
int
main(void) {
return test(
test_pow2_ceil_u64,
test_pow2_ceil_u32,
test_pow2_ceil_zu,
test_lg_ceil_floor);
test_lg_ceil_floor,
test_ffs_u,
test_ffs_lu,
test_ffs_llu,
test_ffs_u32,
test_ffs_u64,
test_ffs_zu);
}