avoid unnecessarily mixing 32-bit and 64-bit ints

It's ever so slightly faster to stick to stick to 64-bit arithmetic and
it avoids clang tidy being unhappy about the implicit widening.
pull/166/head
Daniel Micay 2022-01-03 00:54:43 -05:00
parent 3f8e9d3184
commit 8ae78237ae
1 changed files with 4 additions and 4 deletions

View File

@ -359,11 +359,11 @@ static u64 get_mask(size_t slots) {
static size_t get_free_slot(struct random_state *rng, size_t slots, const struct slab_metadata *metadata) { static size_t get_free_slot(struct random_state *rng, size_t slots, const struct slab_metadata *metadata) {
if (SLOT_RANDOMIZE) { if (SLOT_RANDOMIZE) {
// randomize start location for linear search (uniform random choice is too slow) // randomize start location for linear search (uniform random choice is too slow)
unsigned random_index = get_random_u16_uniform(rng, slots); size_t random_index = get_random_u16_uniform(rng, slots);
unsigned first_bitmap = random_index / 64; size_t first_bitmap = random_index / 64;
u64 random_split = ~(~0UL << (random_index - first_bitmap * 64)); u64 random_split = ~(~0UL << (random_index - first_bitmap * 64));
unsigned i = first_bitmap; size_t i = first_bitmap;
u64 masked = metadata->bitmap[i]; u64 masked = metadata->bitmap[i];
masked |= random_split; masked |= random_split;
for (;;) { for (;;) {
@ -379,7 +379,7 @@ static size_t get_free_slot(struct random_state *rng, size_t slots, const struct
masked = metadata->bitmap[i]; masked = metadata->bitmap[i];
} }
} else { } else {
for (unsigned i = 0; i <= (slots - 1) / 64; i++) { for (size_t i = 0; i <= (slots - 1) / 64; i++) {
u64 masked = metadata->bitmap[i]; u64 masked = metadata->bitmap[i];
if (i == (slots - 1) / 64) { if (i == (slots - 1) / 64) {
masked |= get_mask(slots - i * 64); masked |= get_mask(slots - i * 64);