-static int hash_add(struct tdb_context *tdb,
- uint64_t hash, tdb_off_t off)
-{
- tdb_off_t i, hoff, len, num;
-
- /* Look for next space. */
- i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
- len = (1ULL << tdb->header.v.hash_bits) - i;
- num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
-
- if (unlikely(num == len)) {
- /* We wrapped. Look through start of hash table. */
- i = 0;
- hoff = hash_off(tdb, 0);
- len = (1ULL << tdb->header.v.hash_bits);
- num = tdb_find_zero_off(tdb, hoff, len);
- if (num == len) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "hash_add: full hash table!\n");
- return -1;
- }
- }
- if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "hash_add: overwriting hash table?\n");
- return -1;
- }
-
- /* FIXME: Encode extra hash bits! */
- return tdb_write_off(tdb, hash_off(tdb, i + num), off);
-}
-
-/* If we fail, others will try after us. */
-static void enlarge_hash(struct tdb_context *tdb)
-{
- tdb_off_t newoff, oldoff, i;
- tdb_len_t hlen;
- uint64_t num = 1ULL << tdb->header.v.hash_bits;
- struct tdb_used_record pad, *r;
- unsigned int records = 0;
-
- /* FIXME: We should do this without holding locks throughout. */
- if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
- return;
-
- /* Someone else enlarged for us? Nothing to do. */
- if ((1ULL << tdb->header.v.hash_bits) != num)
- goto unlock;
-
-again:
- /* Allocate our new array. */
- hlen = num * sizeof(tdb_off_t) * 2;
- newoff = alloc(tdb, 0, hlen, 0, false);
- if (unlikely(newoff == TDB_OFF_ERR))
- goto unlock;
- if (unlikely(newoff == 0)) {
- if (tdb_expand(tdb, 0, hlen, false) == -1)
- goto unlock;
- goto again;
- }
- /* Step over record header! */
- newoff += sizeof(struct tdb_used_record);
-
- /* Starts all zero. */
- if (zero_out(tdb, newoff, hlen) == -1)
- goto unlock;
-
- /* Update header now so we can use normal routines. */
- oldoff = tdb->header.v.hash_off;
-
- tdb->header.v.hash_bits++;
- tdb->header.v.hash_off = newoff;
-
- /* FIXME: If the space before is empty, we know this is in its ideal
- * location. Or steal a bit from the pointer to avoid rehash. */
- for (i = 0; i < num; i++) {
- tdb_off_t off;
- off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
- if (unlikely(off == TDB_OFF_ERR))
- goto oldheader;
- if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
- goto oldheader;
- if (off)
- records++;
- }
-
- tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
- "enlarge_hash: moved %u records from %llu buckets.\n",
- records, (long long)num);
-
- /* Free up old hash. */
- r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
- if (!r)
- goto oldheader;
- add_free_record(tdb, rec_zone_bits(r), oldoff - sizeof(*r),
- sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
-
- /* Now we write the modified header. */
- write_header(tdb);
-unlock:
- tdb_allrecord_unlock(tdb, F_WRLCK);
- return;
-
-oldheader:
- tdb->header.v.hash_bits--;
- tdb->header.v.hash_off = oldoff;
- goto unlock;
-}
-
-
-/* This is the slow version of the routine which searches the
- * hashtable for an entry.
- * We lock every hash bucket up to and including the next zero one.
- */
-static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
- struct tdb_data key,
- uint64_t h,
- int ltype,
- tdb_off_t *start_lock,
- tdb_len_t *num_locks,
- tdb_off_t *bucket,
- struct tdb_used_record *rec)
-{
- /* Warning: this may drop the lock on *bucket! */
- *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
- if (*num_locks == TDB_OFF_ERR)
- return TDB_OFF_ERR;
-
- for (*bucket = *start_lock;
- *bucket < *start_lock + *num_locks;
- (*bucket)++) {
- tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
- /* Empty entry or we found it? */
- if (off == 0 || off != TDB_OFF_ERR)
- return off;
- }
-
- /* We didn't find a zero entry? Something went badly wrong... */
- unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
- tdb->ecode = TDB_ERR_CORRUPT;
- tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
- "find_and_lock: expected to find an empty hash bucket!\n");
- return TDB_OFF_ERR;
-}
-
-/* This is the core routine which searches the hashtable for an entry.
- * On error, no locks are held and TDB_OFF_ERR is returned.
- * Otherwise, *num_locks locks of type ltype from *start_lock are held.
- * The bucket where the entry is (or would be) is in *bucket.
- * If not found, the return value is 0.
- * If found, the return value is the offset, and *rec is the record. */
-static tdb_off_t find_and_lock(struct tdb_context *tdb,
- struct tdb_data key,
- uint64_t h,
- int ltype,
- tdb_off_t *start_lock,
- tdb_len_t *num_locks,
- tdb_off_t *bucket,
- struct tdb_used_record *rec)
-{
- tdb_off_t off;
-
- /* FIXME: can we avoid locks for some fast paths? */
- *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
- if (*start_lock == TDB_OFF_ERR)
- return TDB_OFF_ERR;
-
- /* Fast path. */
- off = entry_matches(tdb, *start_lock, h, &key, rec);
- if (likely(off != TDB_OFF_ERR)) {
- *bucket = *start_lock;
- *num_locks = 1;
- return off;
- }
-
- /* Slow path, need to grab more locks and search. */
- return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
- bucket, rec);
-}
-
-/* Returns -1 on error, 0 on OK, 1 on "expand and retry." */