]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/tdb.c
tdb2: fix valgrind warnings.
[ccan] / ccan / tdb2 / tdb.c
index d19ceda239d50e71681607d39487d19187b61ac9..7544d7a1e13ef1bcea9d13ece2a4a8e01be7a3f0 100644 (file)
@@ -147,7 +147,7 @@ static int tdb_new_database(struct tdb_context *tdb)
                                         sizeof(newdb.hdr.hash_test),
                                         newdb.hdr.hash_seed,
                                         tdb->hash_priv);
-
+       memset(newdb.hdr.reserved, 0, sizeof(newdb.hdr.reserved));
        newdb.hdr.v.generation = 0;
 
        /* The initial zone must cover the initial database size! */
@@ -394,7 +394,7 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags,
        return NULL;
 }
 
-static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
+tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
 {
        return tdb->header.v.hash_off
                + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
@@ -549,13 +549,48 @@ static int update_rec_hdr(struct tdb_context *tdb,
        return tdb_write_convert(tdb, off, rec, sizeof(*rec));
 }
 
+static int hash_add(struct tdb_context *tdb,
+                   uint64_t hash, tdb_off_t off)
+{
+       tdb_off_t i, hoff, len, num;
+
+       /* Look for next space. */
+       i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
+       len = (1ULL << tdb->header.v.hash_bits) - i;
+       num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
+
+       if (unlikely(num == len)) {
+               /* We wrapped.  Look through start of hash table. */
+               i = 0;
+               hoff = hash_off(tdb, 0);
+               len = (1ULL << tdb->header.v.hash_bits);
+               num = tdb_find_zero_off(tdb, hoff, len);
+               if (num == len) {
+                       tdb->ecode = TDB_ERR_CORRUPT;
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "hash_add: full hash table!\n");
+                       return -1;
+               }
+       }
+       if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
+               tdb->ecode = TDB_ERR_CORRUPT;
+               tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                        "hash_add: overwriting hash table?\n");
+               return -1;
+       }
+
+       /* FIXME: Encode extra hash bits! */
+       return tdb_write_off(tdb, hash_off(tdb, i + num), off);
+}
+
 /* If we fail, others will try after us. */
 static void enlarge_hash(struct tdb_context *tdb)
 {
        tdb_off_t newoff, oldoff, i;
        tdb_len_t hlen;
-       uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
+       uint64_t num = 1ULL << tdb->header.v.hash_bits;
        struct tdb_used_record pad, *r;
+       unsigned int records = 0;
 
        /* FIXME: We should do this without holding locks throughout. */
        if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
@@ -565,6 +600,7 @@ static void enlarge_hash(struct tdb_context *tdb)
        if ((1ULL << tdb->header.v.hash_bits) != num)
                goto unlock;
 
+again:
        /* Allocate our new array. */
        hlen = num * sizeof(tdb_off_t) * 2;
        newoff = alloc(tdb, 0, hlen, 0, false);
@@ -573,9 +609,7 @@ static void enlarge_hash(struct tdb_context *tdb)
        if (unlikely(newoff == 0)) {
                if (tdb_expand(tdb, 0, hlen, false) == -1)
                        goto unlock;
-               newoff = alloc(tdb, 0, hlen, 0, false);
-               if (newoff == TDB_OFF_ERR || newoff == 0)
-                       goto unlock;
+               goto again;
        }
        /* Step over record header! */
        newoff += sizeof(struct tdb_used_record);
@@ -584,90 +618,171 @@ static void enlarge_hash(struct tdb_context *tdb)
        if (zero_out(tdb, newoff, hlen) == -1)
                goto unlock;
 
+       /* Update header now so we can use normal routines. */
+       oldoff = tdb->header.v.hash_off;
+
+       tdb->header.v.hash_bits++;
+       tdb->header.v.hash_off = newoff;
+
        /* FIXME: If the space before is empty, we know this is in its ideal
         * location.  Or steal a bit from the pointer to avoid rehash. */
-       for (i = tdb_find_nonzero_off(tdb, hash_off(tdb, 0), num);
-            i < num;
-            i += tdb_find_nonzero_off(tdb, hash_off(tdb, i), num - i)) {
+       for (i = 0; i < num; i++) {
                tdb_off_t off;
-               off = tdb_read_off(tdb, hash_off(tdb, i));
+               off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
                if (unlikely(off == TDB_OFF_ERR))
-                       goto unlock;
-               if (unlikely(!off)) {
-                       tdb->ecode = TDB_ERR_CORRUPT;
-                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
-                                "find_bucket_and_lock: zero hash bucket!\n");
-                       goto unlock;
-               }
-
-               /* Find next empty hash slot. */
-               for (h = hash_record(tdb, off);
-                    tdb_read_off(tdb, newoff + (h & ((num * 2)-1))
-                                 * sizeof(tdb_off_t)) != 0;
-                    h++);
-
-               /* FIXME: Encode extra hash bits! */
-               if (tdb_write_off(tdb, newoff + (h & ((num * 2)-1))
-                                 * sizeof(tdb_off_t), off) == -1)
-                       goto unlock;
-               i++;
+                       goto oldheader;
+               if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
+                       goto oldheader;
+               if (off)
+                       records++;
        }
 
+       tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
+                "enlarge_hash: moved %u records from %llu buckets.\n",
+                records, (long long)num);
+
        /* Free up old hash. */
-       oldoff = tdb->header.v.hash_off - sizeof(*r);
-       r = tdb_get(tdb, oldoff, &pad, sizeof(*r));
+       r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
        if (!r)
-               goto unlock;
-       add_free_record(tdb, oldoff,
+               goto oldheader;
+       add_free_record(tdb, oldoff - sizeof(*r),
                        sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
 
        /* Now we write the modified header. */
-       tdb->header.v.hash_bits++;
-       tdb->header.v.hash_off = newoff;
        write_header(tdb);
 unlock:
        tdb_allrecord_unlock(tdb, F_WRLCK);
+       return;
+
+oldheader:
+       tdb->header.v.hash_bits--;
+       tdb->header.v.hash_off = oldoff;
+       goto unlock;
 }
 
-int tdb_store(struct tdb_context *tdb,
-             struct tdb_data key, struct tdb_data dbuf, int flag)
+
+/* This is the slow version of the routine which searches the
+ * hashtable for an entry.
+ * We lock every hash bucket up to and including the next zero one.
+ */
+static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
+                                   struct tdb_data key,
+                                   uint64_t h,
+                                   int ltype,
+                                   tdb_off_t *start_lock,
+                                   tdb_len_t *num_locks,
+                                   tdb_off_t *bucket,
+                                   struct tdb_used_record *rec)
 {
-       tdb_off_t new_off, off, old_bucket, start, num_locks = 1;
-       struct tdb_used_record rec;
-       uint64_t h;
-       bool growing = false;
+       /* Warning: this may drop the lock on *bucket! */
+       *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
+       if (*num_locks == TDB_OFF_ERR)
+               return TDB_OFF_ERR;
 
-       h = tdb_hash(tdb, key.dptr, key.dsize);
+       for (*bucket = *start_lock;
+            *bucket < *start_lock + *num_locks;
+            (*bucket)++) {
+               tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
+               /* Empty entry or we found it? */
+               if (off == 0 || off != TDB_OFF_ERR)
+                       return off;
+       }
+
+       /* We didn't find a zero entry?  Something went badly wrong... */
+       unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
+       tdb->ecode = TDB_ERR_CORRUPT;
+       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                "find_and_lock: expected to find an empty hash bucket!\n");
+       return TDB_OFF_ERR;
+}
+
+/* This is the core routine which searches the hashtable for an entry.
+ * On error, no locks are held and TDB_OFF_ERR is returned.
+ * Otherwise, *num_locks locks of type ltype from *start_lock are held.
+ * The bucket where the entry is (or would be) is in *bucket.
+ * If not found, the return value is 0.
+ * If found, the return value is the offset, and *rec is the record. */
+static tdb_off_t find_and_lock(struct tdb_context *tdb,
+                              struct tdb_data key,
+                              uint64_t h,
+                              int ltype,
+                              tdb_off_t *start_lock,
+                              tdb_len_t *num_locks,
+                              tdb_off_t *bucket,
+                              struct tdb_used_record *rec)
+{
+       tdb_off_t off;
 
        /* FIXME: can we avoid locks for some fast paths? */
-       start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
-       if (start == TDB_OFF_ERR)
-               return -1;
+       *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
+       if (*start_lock == TDB_OFF_ERR)
+               return TDB_OFF_ERR;
 
        /* Fast path. */
-       old_bucket = start;
-       off = entry_matches(tdb, start, h, &key, &rec);
-       if (unlikely(off == TDB_OFF_ERR)) {
-               /* Slow path, need to grab more locks and search. */
-               tdb_off_t i;
-
-               /* Warning: this may drop the lock!  Does that on error. */
-               num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
-               if (num_locks == TDB_OFF_ERR)
-                       return -1;
+       off = entry_matches(tdb, *start_lock, h, &key, rec);
+       if (likely(off != TDB_OFF_ERR)) {
+               *bucket = *start_lock;
+               *num_locks = 1;
+               return off;
+       }
 
-               for (i = start; i < start + num_locks; i++) {
-                       off = entry_matches(tdb, i, h, &key, &rec);
-                       /* Empty entry or we found it? */
-                       if (off == 0 || off != TDB_OFF_ERR)
-                               break;
-               }
-               if (i == start + num_locks)
-                       off = 0;
+       /* Slow path, need to grab more locks and search. */
+       return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
+                                 bucket, rec);
+}
 
-               /* Even if not found, this is where we put the new entry. */
-               old_bucket = i;
-       }
+/* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
+static int replace_data(struct tdb_context *tdb,
+                       uint64_t h, struct tdb_data key, struct tdb_data dbuf,
+                       tdb_off_t bucket,
+                       tdb_off_t old_off, tdb_len_t old_room,
+                       bool growing)
+{
+       tdb_off_t new_off;
+
+       /* Allocate a new record. */
+       new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
+       if (unlikely(new_off == TDB_OFF_ERR))
+               return -1;
+
+       if (unlikely(new_off == 0))
+               return 1;
+
+       /* We didn't like the existing one: remove it. */
+       if (old_off)
+               add_free_record(tdb, old_off,
+                               sizeof(struct tdb_used_record)
+                               + key.dsize + old_room);
+
+       /* FIXME: Encode extra hash bits! */
+       if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
+               return -1;
+
+       new_off += sizeof(struct tdb_used_record);
+       if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
+               return -1;
+
+       new_off += key.dsize;
+       if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
+               return -1;
+
+       /* FIXME: tdb_increment_seqnum(tdb); */
+       return 0;
+}
+
+int tdb_store(struct tdb_context *tdb,
+             struct tdb_data key, struct tdb_data dbuf, int flag)
+{
+       tdb_off_t off, bucket, start, num;
+       tdb_len_t old_room = 0;
+       struct tdb_used_record rec;
+       uint64_t h;
+       int ret;
+
+       h = tdb_hash(tdb, key.dptr, key.dsize);
+       off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return -1;
 
        /* Now we have lock on this hash bucket. */
        if (flag == TDB_INSERT) {
@@ -677,18 +792,22 @@ int tdb_store(struct tdb_context *tdb,
                }
        } else {
                if (off) {
-                       if (rec_data_length(&rec) + rec_extra_padding(&rec)
-                           >= dbuf.dsize) {
-                               new_off = off;
+                       old_room = rec_data_length(&rec)
+                               + rec_extra_padding(&rec);
+                       if (old_room >= dbuf.dsize) {
+                               /* Can modify in-place.  Easy! */
                                if (update_rec_hdr(tdb, off,
                                                   key.dsize, dbuf.dsize,
                                                   &rec, h))
                                        goto fail;
-                               goto write;
+                               if (tdb->methods->write(tdb, off + sizeof(rec)
+                                                       + key.dsize,
+                                                       dbuf.dptr, dbuf.dsize))
+                                       goto fail;
+                               unlock_lists(tdb, start, num, F_WRLCK);
+                               return 0;
                        }
                        /* FIXME: See if right record is free? */
-                       /* Hint to allocator that we've realloced. */
-                       growing = true;
                } else {
                        if (flag == TDB_MODIFY) {
                                /* if the record doesn't exist and we
@@ -700,202 +819,191 @@ int tdb_store(struct tdb_context *tdb,
                }
        }
 
-       /* Allocate a new record. */
-       new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
-       if (new_off == 0) {
-               unlock_lists(tdb, start, num_locks, F_WRLCK);
+       /* If we didn't use the old record, this implies we're growing. */
+       ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room, off != 0);
+       unlock_lists(tdb, start, num, F_WRLCK);
+
+       if (unlikely(ret == 1)) {
                /* Expand, then try again... */
-               if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
+               if (tdb_expand(tdb, key.dsize, dbuf.dsize, off != 0) == -1)
                        return -1;
                return tdb_store(tdb, key, dbuf, flag);
        }
 
-       /* We didn't like the existing one: remove it. */
-       if (off) {
-               add_free_record(tdb, off, sizeof(struct tdb_used_record)
-                               + rec_key_length(&rec)
-                               + rec_data_length(&rec)
-                               + rec_extra_padding(&rec));
-       }
-
-       /* FIXME: Encode extra hash bits! */
-       if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1)
-               goto fail;
-
-write:
-       off = new_off + sizeof(struct tdb_used_record);
-       if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
-               goto fail;
-       off += key.dsize;
-       if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
-               goto fail;
-
-       /* FIXME: tdb_increment_seqnum(tdb); */
-       unlock_lists(tdb, start, num_locks, F_WRLCK);
-
        /* FIXME: by simple simulation, this approximated 60% full.
         * Check in real case! */
-       if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 30))
+       if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
                enlarge_hash(tdb);
 
-       return 0;
+       return ret;
 
 fail:
-       unlock_lists(tdb, start, num_locks, F_WRLCK);
+       unlock_lists(tdb, start, num, F_WRLCK);
        return -1;
 }
 
-struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
+int tdb_append(struct tdb_context *tdb,
+              struct tdb_data key, struct tdb_data dbuf)
 {
-       tdb_off_t off, start, num_locks = 1;
+       tdb_off_t off, bucket, start, num;
        struct tdb_used_record rec;
+       tdb_len_t old_room = 0, old_dlen;
        uint64_t h;
-       struct tdb_data ret;
+       unsigned char *newdata;
+       struct tdb_data new_dbuf;
+       int ret;
 
        h = tdb_hash(tdb, key.dptr, key.dsize);
+       off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return -1;
 
-       /* FIXME: can we avoid locks for some fast paths? */
-       start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT);
-       if (start == TDB_OFF_ERR)
-               return tdb_null;
+       if (off) {
+               old_dlen = rec_data_length(&rec);
+               old_room = old_dlen + rec_extra_padding(&rec);
 
-       /* Fast path. */
-       off = entry_matches(tdb, start, h, &key, &rec);
-       if (unlikely(off == TDB_OFF_ERR)) {
-               /* Slow path, need to grab more locks and search. */
-               tdb_off_t i;
-
-               /* Warning: this may drop the lock!  Does that on error. */
-               num_locks = relock_hash_to_zero(tdb, start, F_RDLCK);
-               if (num_locks == TDB_OFF_ERR)
-                       return tdb_null;
-
-               for (i = start; i < start + num_locks; i++) {
-                       off = entry_matches(tdb, i, h, &key, &rec);
-                       /* Empty entry or we found it? */
-                       if (off == 0 || off != TDB_OFF_ERR)
-                               break;
+               /* Fast path: can append in place. */
+               if (rec_extra_padding(&rec) >= dbuf.dsize) {
+                       if (update_rec_hdr(tdb, off, key.dsize,
+                                          old_dlen + dbuf.dsize, &rec, h))
+                               goto fail;
+
+                       off += sizeof(rec) + key.dsize + old_dlen;
+                       if (tdb->methods->write(tdb, off, dbuf.dptr,
+                                               dbuf.dsize) == -1)
+                               goto fail;
+
+                       /* FIXME: tdb_increment_seqnum(tdb); */
+                       unlock_lists(tdb, start, num, F_WRLCK);
+                       return 0;
+               }
+               /* FIXME: Check right record free? */
+
+               /* Slow path. */
+               newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
+               if (!newdata) {
+                       tdb->ecode = TDB_ERR_OOM;
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "tdb_append: cannot allocate %llu bytes!\n",
+                                (long long)key.dsize + old_dlen + dbuf.dsize);
+                       goto fail;
+               }
+               if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
+                                      newdata, old_dlen) != 0) {
+                       free(newdata);
+                       goto fail;
                }
-               if (i == start + num_locks)
-                       off = 0;
+               memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
+               new_dbuf.dptr = newdata;
+               new_dbuf.dsize = old_dlen + dbuf.dsize;
+       } else {
+               newdata = NULL;
+               new_dbuf = dbuf;
        }
 
-       if (!off) {
-               unlock_lists(tdb, start, num_locks, F_RDLCK);
-               tdb->ecode = TDB_ERR_NOEXIST;
-               return tdb_null;
+       /* If they're using tdb_append(), it implies they're growing record. */
+       ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room, true);
+       unlock_lists(tdb, start, num, F_WRLCK);
+       free(newdata);
+
+       if (unlikely(ret == 1)) {
+               /* Expand, then try again. */
+               if (tdb_expand(tdb, key.dsize, dbuf.dsize, true) == -1)
+                       return -1;
+               return tdb_append(tdb, key, dbuf);
        }
 
-       ret.dsize = rec_data_length(&rec);
-       ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
-                                 ret.dsize);
-       unlock_lists(tdb, start, num_locks, F_RDLCK);
+       /* FIXME: by simple simulation, this approximated 60% full.
+        * Check in real case! */
+       if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
+               enlarge_hash(tdb);
+
        return ret;
+
+fail:
+       unlock_lists(tdb, start, num, F_WRLCK);
+       return -1;
 }
 
-static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
+struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
 {
-       tdb_off_t i, hoff, len, num;
+       tdb_off_t off, start, num, bucket;
+       struct tdb_used_record rec;
+       uint64_t h;
+       struct tdb_data ret;
 
-       /* Look for next space. */
-       i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
-       len = (1ULL << tdb->header.v.hash_bits) - i;
-       num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
+       h = tdb_hash(tdb, key.dptr, key.dsize);
+       off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return tdb_null;
 
-       if (unlikely(num == len)) {
-               /* We wrapped.  Look through start of hash table. */
-               hoff = hash_off(tdb, 0);
-               len = (1ULL << tdb->header.v.hash_bits);
-               num = tdb_find_zero_off(tdb, hoff, len);
-               if (i == len) {
-                       tdb->ecode = TDB_ERR_CORRUPT;
-                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
-                                "hash_add: full hash table!\n");
-                       return -1;
-               }
+       if (!off) {
+               tdb->ecode = TDB_ERR_NOEXIST;
+               ret = tdb_null;
+       } else {
+               ret.dsize = rec_data_length(&rec);
+               ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
+                                         ret.dsize);
        }
-       /* FIXME: Encode extra hash bits! */
-       return tdb_write_off(tdb, hash_off(tdb, i + num), off);
+
+       unlock_lists(tdb, start, num, F_RDLCK);
+       return ret;
 }
 
 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
 {
-       tdb_off_t i, old_bucket, off, start, num_locks = 1;
+       tdb_off_t i, bucket, off, start, num;
        struct tdb_used_record rec;
        uint64_t h;
 
        h = tdb_hash(tdb, key.dptr, key.dsize);
-
-       /* FIXME: can we avoid locks for some fast paths? */
        start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
-       if (start == TDB_OFF_ERR)
+       if (unlikely(start == TDB_OFF_ERR))
                return -1;
 
-       /* Fast path. */
-       old_bucket = start;
-       off = entry_matches(tdb, start, h, &key, &rec);
-       if (off && off != TDB_OFF_ERR) {
-               /* We can only really fastpath delete if next bucket
-                * is 0.  Note that we haven't locked it, but our lock
-                * on this bucket stops anyone overflowing into it
-                * while we look. */
-               if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0)
-                       goto delete;
-               /* Slow path. */
-               off = TDB_OFF_ERR;
-       }
-
-       if (unlikely(off == TDB_OFF_ERR)) {
-               /* Slow path, need to grab more locks and search. */
-               tdb_off_t i;
-
-               /* Warning: this may drop the lock!  Does that on error. */
-               num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
-               if (num_locks == TDB_OFF_ERR)
-                       return -1;
-
-               for (i = start; i < start + num_locks; i++) {
-                       off = entry_matches(tdb, i, h, &key, &rec);
-                       /* Empty entry or we found it? */
-                       if (off == 0 || off != TDB_OFF_ERR) {
-                               old_bucket = i;
-                               break;
-                       }
-               }
-               if (i == start + num_locks)
-                       off = 0;
-       }
+       /* FIXME: Fastpath: if next is zero, we can delete without lock,
+        * since this lock protects us. */
+       off = find_and_lock_slow(tdb, key, h, F_WRLCK,
+                                &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return -1;
 
        if (!off) {
-               unlock_lists(tdb, start, num_locks, F_WRLCK);
+               /* FIXME: We could optimize not found case if it mattered, by
+                * reading offset after first lock: if it's zero, goto here. */
+               unlock_lists(tdb, start, num, F_WRLCK);
                tdb->ecode = TDB_ERR_NOEXIST;
                return -1;
        }
+       /* Since we found the entry, we must have locked it and a zero. */
+       assert(num >= 2);
 
-delete:
        /* This actually unlinks it. */
-       if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1)
+       if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
                goto unlock_err;
 
        /* Rehash anything following. */
-       for (i = hash_off(tdb, old_bucket+1);
-            i != hash_off(tdb, h + num_locks);
-            i += sizeof(tdb_off_t)) {
-               tdb_off_t off2;
+       for (i = bucket+1; i != bucket + num - 1; i++) {
+               tdb_off_t hoff, off2;
                uint64_t h2;
 
-               off2 = tdb_read_off(tdb, i);
+               hoff = hash_off(tdb, i);
+               off2 = tdb_read_off(tdb, hoff);
                if (unlikely(off2 == TDB_OFF_ERR))
                        goto unlock_err;
 
+               /* This can happen if we raced. */
+               if (unlikely(off2 == 0))
+                       break;
+
                /* Maybe use a bit to indicate it is in ideal place? */
                h2 = hash_record(tdb, off2);
                /* Is it happy where it is? */
-               if (hash_off(tdb, h2) == i)
+               if (hash_off(tdb, h2) == hoff)
                        continue;
 
                /* Remove it. */
-               if (tdb_write_off(tdb, i, 0) == -1)
+               if (tdb_write_off(tdb, hoff, 0) == -1)
                        goto unlock_err;
 
                /* Rehash it. */
@@ -911,11 +1019,11 @@ delete:
                            + rec_extra_padding(&rec)) != 0)
                goto unlock_err;
 
-       unlock_lists(tdb, start, num_locks, F_WRLCK);
+       unlock_lists(tdb, start, num, F_WRLCK);
        return 0;
 
 unlock_err:
-       unlock_lists(tdb, start, num_locks, F_WRLCK);
+       unlock_lists(tdb, start, num, F_WRLCK);
        return -1;
 }