-static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
-{
- return tdb->header.v.hash_off
- + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
- * sizeof(tdb_off_t));
-}
-
-/* Returns 0 if the entry is a zero (definitely not a match).
- * Returns a valid entry offset if it's a match. Fills in rec.
- * Otherwise returns TDB_OFF_ERR: keep searching. */
-static tdb_off_t entry_matches(struct tdb_context *tdb,
- uint64_t list,
- uint64_t hash,
- const struct tdb_data *key,
- struct tdb_used_record *rec)
-{
- tdb_off_t off;
- uint64_t keylen;
- const unsigned char *rkey;
-
- off = tdb_read_off(tdb, tdb->header.v.hash_off
- + list * sizeof(tdb_off_t));
- if (off == 0 || off == TDB_OFF_ERR)
- return off;
-
-#if 0 /* FIXME: Check other bits. */
- unsigned int bits, bitmask, hoffextra;
- /* Bottom three bits show how many extra hash bits. */
- bits = (off & ((1 << TDB_EXTRA_HASHBITS_NUM) - 1)) + 1;
- bitmask = (1 << bits)-1;
- hoffextra = ((off >> TDB_EXTRA_HASHBITS_NUM) & bitmask);
- uint64_t hextra = hash >> tdb->header.v.hash_bits;
- if ((hextra & bitmask) != hoffextra)
- return TDB_OFF_ERR;
- off &= ~...;
-#endif
-
- if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
- return TDB_OFF_ERR;
-
- /* FIXME: check extra bits in header! */
- keylen = rec_key_length(rec);
- if (keylen != key->dsize)
- return TDB_OFF_ERR;
-
- rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen);
- if (!rkey)
- return TDB_OFF_ERR;
- if (memcmp(rkey, key->dptr, keylen) != 0)
- off = TDB_OFF_ERR;
- tdb_access_release(tdb, rkey);
- return off;
-}
-
-/* FIXME: Optimize? */
-static void unlock_range(struct tdb_context *tdb,
- tdb_off_t list, tdb_len_t num,
- int ltype)
-{
- tdb_off_t i;
-
- for (i = list; i < list + num; i++)
- tdb_unlock_list(tdb, i, ltype);
-}
-
-/* FIXME: Optimize? */
-static int lock_range(struct tdb_context *tdb,
- tdb_off_t list, tdb_len_t num,
- int ltype)
-{
- tdb_off_t i;
-
- for (i = list; i < list + num; i++) {
- if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != 0) {
- unlock_range(tdb, list, i - list, ltype);
- return -1;
- }
- }
- return 0;
-}
-
-/* We lock hashes up to the next empty offset. We already hold the
- * lock on the start bucket, but we may need to release and re-grab
- * it. If we fail, we hold no locks at all! */
-static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
- tdb_off_t start, int ltype)
-{
- tdb_len_t num, len, pre_locks;
-
-again:
- num = 1ULL << tdb->header.v.hash_bits;
- len = tdb_find_zero_off(tdb, hash_off(tdb, start), num - start);
- if (unlikely(len == num - start)) {
- /* We hit the end of the hash range. Drop lock: we have
- to lock start of hash first. */
- tdb_unlock_list(tdb, start, ltype);
- /* Grab something, so header is stable. */
- if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
- return TDB_OFF_ERR;
- len = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
- if (lock_range(tdb, 1, len, ltype) == -1) {
- tdb_unlock_list(tdb, 0, ltype);
- return TDB_OFF_ERR;
- }
- pre_locks = len;
- len = num - start;
- } else {
- /* We already have lock on start. */
- start++;
- pre_locks = 0;
- }
- if (unlikely(lock_range(tdb, start, len, ltype) == -1)) {
- if (pre_locks)
- unlock_range(tdb, 0, pre_locks, ltype);
- else
- tdb_unlock_list(tdb, start, ltype);
- return TDB_OFF_ERR;
- }
-
- /* Now, did we lose the race, and it's not zero any more? */
- if (unlikely(tdb_read_off(tdb, hash_off(tdb, pre_locks + len)) != 0)) {
- unlock_range(tdb, 0, pre_locks, ltype);
- /* Leave the start locked, as expected. */
- unlock_range(tdb, start + 1, len - 1, ltype);
- goto again;
- }
-
- return pre_locks + len;
-}
-