+ return;
+
+oldheader:
+ tdb->header.v.hash_bits--;
+ tdb->header.v.hash_off = oldoff;
+ goto unlock;
+}
+
+
+/* This is the slow version of the routine which searches the
+ * hashtable for an entry.
+ * We lock every hash bucket up to and including the next zero one.
+ */
+static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
+ struct tdb_data key,
+ uint64_t h,
+ int ltype,
+ tdb_off_t *start_lock,
+ tdb_len_t *num_locks,
+ tdb_off_t *bucket,
+ struct tdb_used_record *rec)
+{
+ /* Warning: this may drop the lock on *bucket! */
+ *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
+ if (*num_locks == TDB_OFF_ERR)
+ return TDB_OFF_ERR;
+
+ for (*bucket = *start_lock;
+ *bucket < *start_lock + *num_locks;
+ (*bucket)++) {
+ tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
+ /* Empty entry or we found it? */
+ if (off == 0 || off != TDB_OFF_ERR)
+ return off;
+ }
+
+ /* We didn't find a zero entry? Something went badly wrong... */
+ unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
+ tdb->ecode = TDB_ERR_CORRUPT;
+ tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+ "find_and_lock: expected to find an empty hash bucket!\n");
+ return TDB_OFF_ERR;
+}
+
+/* This is the core routine which searches the hashtable for an entry.
+ * On error, no locks are held and TDB_OFF_ERR is returned.
+ * Otherwise, *num_locks locks of type ltype from *start_lock are held.
+ * The bucket where the entry is (or would be) is in *bucket.
+ * If not found, the return value is 0.
+ * If found, the return value is the offset, and *rec is the record. */
+static tdb_off_t find_and_lock(struct tdb_context *tdb,
+ struct tdb_data key,
+ uint64_t h,
+ int ltype,
+ tdb_off_t *start_lock,
+ tdb_len_t *num_locks,
+ tdb_off_t *bucket,
+ struct tdb_used_record *rec)
+{
+ tdb_off_t off;
+
+ /* FIXME: can we avoid locks for some fast paths? */
+ *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
+ if (*start_lock == TDB_OFF_ERR)
+ return TDB_OFF_ERR;
+
+ /* Fast path. */
+ off = entry_matches(tdb, *start_lock, h, &key, rec);
+ if (likely(off != TDB_OFF_ERR)) {
+ *bucket = *start_lock;
+ *num_locks = 1;
+ return off;
+ }
+
+ /* Slow path, need to grab more locks and search. */
+ return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
+ bucket, rec);
+}
+
+/* Returns -1 on error, 0 on OK" */
+static int replace_data(struct tdb_context *tdb,
+ uint64_t h, struct tdb_data key, struct tdb_data dbuf,
+ tdb_off_t bucket,
+ tdb_off_t old_off, tdb_len_t old_room,
+ unsigned old_zone,
+ bool growing)
+{
+ tdb_off_t new_off;
+
+ /* Allocate a new record. */
+ new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
+ if (unlikely(new_off == TDB_OFF_ERR))
+ return -1;
+
+ /* We didn't like the existing one: remove it. */
+ if (old_off)
+ add_free_record(tdb, old_zone, old_off,
+ sizeof(struct tdb_used_record)
+ + key.dsize + old_room);
+
+ /* FIXME: Encode extra hash bits! */
+ if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
+ return -1;
+
+ new_off += sizeof(struct tdb_used_record);
+ if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
+ return -1;
+
+ new_off += key.dsize;
+ if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
+ return -1;
+
+ /* FIXME: tdb_increment_seqnum(tdb); */
+ return 0;