/* We use 0x1 as deleted marker. */
#define HTABLE_DELETED (0x1)
+/* perfect_bitnum 63 means there's no perfect bitnum */
+#define NO_PERFECT_BIT (sizeof(uintptr_t) * CHAR_BIT - 1)
+
static void *htable_default_alloc(struct htable *ht, size_t len)
{
return calloc(len, 1);
return e > HTABLE_DELETED;
}
+static inline uintptr_t ht_perfect_mask(const struct htable *ht)
+{
+ return (uintptr_t)2 << ht->perfect_bitnum;
+}
+
static inline uintptr_t get_hash_ptr_bits(const struct htable *ht,
size_t hash)
{
* end is quite expensive. But the lower bits are redundant, so
* we fold the value first. */
return (hash ^ (hash >> ht->bits))
- & ht->common_mask & ~ht->perfect_bit;
+ & ht->common_mask & ~ht_perfect_mask(ht);
}
void htable_init(struct htable *ht,
*ht = empty;
ht->rehash = rehash;
ht->priv = priv;
- ht->table = &ht->perfect_bit;
+ ht->table = &ht->common_bits;
+}
+
+static inline size_t ht_max(const struct htable *ht)
+{
+ return ((size_t)3 << ht->bits) / 4;
}
-/* We've changed ht->bits, update ht->max and ht->max_with_deleted */
-static void htable_adjust_capacity(struct htable *ht)
+static inline size_t ht_max_with_deleted(const struct htable *ht)
{
- ht->max = ((size_t)3 << ht->bits) / 4;
- ht->max_with_deleted = ((size_t)9 << ht->bits) / 10;
+ return ((size_t)9 << ht->bits) / 10;
}
bool htable_init_sized(struct htable *ht,
ht->table = htable_alloc(ht, sizeof(size_t) << ht->bits);
if (!ht->table) {
- ht->table = &ht->perfect_bit;
+ ht->table = &ht->common_bits;
return false;
}
- htable_adjust_capacity(ht);
(void)htable_debug(ht, HTABLE_LOC);
return true;
}
void htable_clear(struct htable *ht)
{
- if (ht->table != &ht->perfect_bit)
+ if (ht->table != &ht->common_bits)
htable_free(ht, (void *)ht->table);
htable_init(ht, ht->rehash, ht->priv);
}
struct htable_iter *i, size_t hash)
{
i->off = hash_bucket(ht, hash);
- return htable_val(ht, i, hash, ht->perfect_bit);
+ return htable_val(ht, i, hash, ht_perfect_mask(ht));
}
void *htable_nextval_(const struct htable *ht,
static void ht_add(struct htable *ht, const void *new, size_t h)
{
size_t i;
- uintptr_t perfect = ht->perfect_bit;
+ uintptr_t perfect = ht_perfect_mask(ht);
i = hash_bucket(ht, h);
return false;
}
ht->bits++;
- htable_adjust_capacity(ht);
/* If we lost our "perfect bit", get it back now. */
- if (!ht->perfect_bit && ht->common_mask) {
+ if (ht->perfect_bitnum == NO_PERFECT_BIT && ht->common_mask) {
for (i = 0; i < sizeof(ht->common_mask) * CHAR_BIT; i++) {
- if (ht->common_mask & ((size_t)1 << i)) {
- ht->perfect_bit = (size_t)1 << i;
+ if (ht->common_mask & ((size_t)2 << i)) {
+ ht->perfect_bitnum = i;
break;
}
}
}
- if (oldtable != &ht->perfect_bit) {
+ if (oldtable != &ht->common_bits) {
for (i = 0; i < oldnum; i++) {
if (entry_is_valid(e = oldtable[i])) {
void *p = get_raw_ptr(ht, e);
static COLD void rehash_table(struct htable *ht)
{
size_t start, i;
- uintptr_t e;
+ uintptr_t e, perfect = ht_perfect_mask(ht);
/* Beware wrap cases: we need to start from first empty bucket. */
for (start = 0; ht->table[start]; start++);
continue;
if (e == HTABLE_DELETED)
ht->table[h] = 0;
- else if (!(e & ht->perfect_bit)) {
+ else if (!(e & perfect)) {
void *p = get_raw_ptr(ht, e);
ht->table[h] = 0;
ht_add(ht, p, ht->rehash(p, ht->priv));
ht->common_mask = ~((uintptr_t)1 << i);
ht->common_bits = ((uintptr_t)p & ht->common_mask);
- ht->perfect_bit = 1;
+ ht->perfect_bitnum = 0;
(void)htable_debug(ht, HTABLE_LOC);
return;
}
/* Take away those bits from our mask, bits and perfect bit. */
ht->common_mask &= ~maskdiff;
ht->common_bits &= ~maskdiff;
- ht->perfect_bit &= ~maskdiff;
+ if (ht_perfect_mask(ht) & maskdiff)
+ ht->perfect_bitnum = NO_PERFECT_BIT;
(void)htable_debug(ht, HTABLE_LOC);
}
bool htable_add_(struct htable *ht, size_t hash, const void *p)
{
- if (ht->elems+1 > ht->max && !double_table(ht))
+ if (ht->elems+1 > ht_max(ht) && !double_table(ht))
return false;
- if (ht->elems+1 + ht->deleted > ht->max_with_deleted)
+ if (ht->elems+1 + ht->deleted > ht_max_with_deleted(ht))
rehash_table(ht);
assert(p);
if (((uintptr_t)p & ht->common_mask) != ht->common_bits)