]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/tdb.c
tdb2: fix valgrind warnings.
[ccan] / ccan / tdb2 / tdb.c
index 61791c9d5b5ca8af9ef22a1f88e07ce8f8bbafc7..7544d7a1e13ef1bcea9d13ece2a4a8e01be7a3f0 100644 (file)
@@ -147,7 +147,7 @@ static int tdb_new_database(struct tdb_context *tdb)
                                         sizeof(newdb.hdr.hash_test),
                                         newdb.hdr.hash_seed,
                                         tdb->hash_priv);
-
+       memset(newdb.hdr.reserved, 0, sizeof(newdb.hdr.reserved));
        newdb.hdr.v.generation = 0;
 
        /* The initial zone must cover the initial database size! */
@@ -236,12 +236,24 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags,
        tdb_io_init(tdb);
        tdb_lock_init(tdb);
 
-       /* FIXME */
-       if (attr) {
-               tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
-                        "tdb_open: attributes not yet supported\n");
-               errno = EINVAL;
-               goto fail;
+       while (attr) {
+               switch (attr->base.attr) {
+               case TDB_ATTRIBUTE_LOG:
+                       tdb->log = attr->log.log_fn;
+                       tdb->log_priv = attr->log.log_private;
+                       break;
+               case TDB_ATTRIBUTE_HASH:
+                       tdb->khash = attr->hash.hash_fn;
+                       tdb->hash_priv = attr->hash.hash_private;
+                       break;
+               default:
+                       tdb->log(tdb, TDB_DEBUG_ERROR, tdb->log_priv,
+                                "tdb_open: unknown attribute type %u\n",
+                                attr->base.attr);
+                       errno = EINVAL;
+                       goto fail;
+               }
+               attr = attr->base.next;
        }
 
        if ((open_flags & O_ACCMODE) == O_WRONLY) {
@@ -268,6 +280,8 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags,
                }
                TEST_IT(tdb->flags & TDB_CONVERT);
                tdb_convert(tdb, &tdb->header, sizeof(tdb->header));
+               /* Zones don't matter for internal db. */
+               tdb->last_zone = 0;
                return tdb;
        }
 
@@ -380,7 +394,7 @@ struct tdb_context *tdb_open(const char *name, int tdb_flags,
        return NULL;
 }
 
-static tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
+tdb_off_t hash_off(struct tdb_context *tdb, uint64_t list)
 {
        return tdb->header.v.hash_off
                + ((list & ((1ULL << tdb->header.v.hash_bits) - 1))
@@ -400,6 +414,8 @@ static tdb_off_t entry_matches(struct tdb_context *tdb,
        uint64_t keylen;
        const unsigned char *rkey;
 
+       list &= ((1ULL << tdb->header.v.hash_bits) - 1);
+
        off = tdb_read_off(tdb, tdb->header.v.hash_off
                           + list * sizeof(tdb_off_t));
        if (off == 0 || off == TDB_OFF_ERR)
@@ -425,7 +441,7 @@ static tdb_off_t entry_matches(struct tdb_context *tdb,
        if (keylen != key->dsize)
                return TDB_OFF_ERR;
 
-       rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen);
+       rkey = tdb_access_read(tdb, off + sizeof(*rec), keylen, false);
        if (!rkey)
                return TDB_OFF_ERR;
        if (memcmp(rkey, key->dptr, keylen) != 0)
@@ -435,7 +451,7 @@ static tdb_off_t entry_matches(struct tdb_context *tdb,
 }
 
 /* FIXME: Optimize? */
-static void unlock_range(struct tdb_context *tdb,
+static void unlock_lists(struct tdb_context *tdb,
                         tdb_off_t list, tdb_len_t num,
                         int ltype)
 {
@@ -446,15 +462,16 @@ static void unlock_range(struct tdb_context *tdb,
 }
 
 /* FIXME: Optimize? */
-static int lock_range(struct tdb_context *tdb,
+static int lock_lists(struct tdb_context *tdb,
                      tdb_off_t list, tdb_len_t num,
                      int ltype)
 {
        tdb_off_t i;
 
        for (i = list; i < list + num; i++) {
-               if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT) != 0) {
-                       unlock_range(tdb, list, i - list, ltype);
+               if (tdb_lock_list(tdb, i, ltype, TDB_LOCK_WAIT)
+                   == TDB_OFF_ERR) {
+                       unlock_lists(tdb, list, i - list, ltype);
                        return -1;
                }
        }
@@ -467,7 +484,7 @@ static int lock_range(struct tdb_context *tdb,
 static tdb_len_t relock_hash_to_zero(struct tdb_context *tdb,
                                     tdb_off_t start, int ltype)
 {
-       tdb_len_t num, len, pre_locks;
+       tdb_len_t num, len;
 
 again:
        num = 1ULL << tdb->header.v.hash_bits;
@@ -475,39 +492,45 @@ again:
        if (unlikely(len == num - start)) {
                /* We hit the end of the hash range.  Drop lock: we have
                   to lock start of hash first. */
+               tdb_len_t pre_locks;
+
                tdb_unlock_list(tdb, start, ltype);
+
                /* Grab something, so header is stable. */
                if (tdb_lock_list(tdb, 0, ltype, TDB_LOCK_WAIT))
                        return TDB_OFF_ERR;
-               len = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
-               if (lock_range(tdb, 1, len, ltype) == -1) {
+               pre_locks = tdb_find_zero_off(tdb, hash_off(tdb, 0), num);
+               /* We want to lock the zero entry as well. */
+               pre_locks++;
+               if (lock_lists(tdb, 1, pre_locks - 1, ltype) == -1) {
                        tdb_unlock_list(tdb, 0, ltype);
                        return TDB_OFF_ERR;
                }
-               pre_locks = len;
-               len = num - start;
+
+               /* Now lock later ones. */
+               if (unlikely(lock_lists(tdb, start, len, ltype) == -1)) {
+                       unlock_lists(tdb, 0, pre_locks, ltype);
+                       return TDB_OFF_ERR;
+               }
+               len += pre_locks;
        } else {
-               /* We already have lock on start. */
-               start++;
-               pre_locks = 0;
-       }
-       if (unlikely(lock_range(tdb, start, len, ltype) == -1)) {
-               if (pre_locks)
-                       unlock_range(tdb, 0, pre_locks, ltype);
-               else
+               /* We want to lock the zero entry as well. */
+               len++;
+               /* But we already have lock on start. */
+               if (unlikely(lock_lists(tdb, start+1, len-1, ltype) == -1)) {
                        tdb_unlock_list(tdb, start, ltype);
-               return TDB_OFF_ERR;
+                       return TDB_OFF_ERR;
+               }
        }
 
        /* Now, did we lose the race, and it's not zero any more? */
-       if (unlikely(tdb_read_off(tdb, hash_off(tdb, pre_locks + len)) != 0)) {
-               unlock_range(tdb, 0, pre_locks, ltype);
+       if (unlikely(tdb_read_off(tdb, hash_off(tdb, start + len - 1)) != 0)) {
                /* Leave the start locked, as expected. */
-               unlock_range(tdb, start + 1, len - 1, ltype);
+               unlock_lists(tdb, start + 1, len - 1, ltype);
                goto again;
        }
 
-       return pre_locks + len;
+       return len;
 }
 
 /* FIXME: modify, don't rewrite! */
@@ -518,20 +541,56 @@ static int update_rec_hdr(struct tdb_context *tdb,
                          struct tdb_used_record *rec,
                          uint64_t h)
 {
-       uint64_t room = rec_data_length(rec) + rec_extra_padding(rec);
+       uint64_t dataroom = rec_data_length(rec) + rec_extra_padding(rec);
 
-       if (set_header(tdb, rec, keylen, datalen, room - datalen, h))
+       if (set_header(tdb, rec, keylen, datalen, keylen + dataroom, h))
                return -1;
 
        return tdb_write_convert(tdb, off, rec, sizeof(*rec));
 }
 
+static int hash_add(struct tdb_context *tdb,
+                   uint64_t hash, tdb_off_t off)
+{
+       tdb_off_t i, hoff, len, num;
+
+       /* Look for next space. */
+       i = (hash & ((1ULL << tdb->header.v.hash_bits) - 1));
+       len = (1ULL << tdb->header.v.hash_bits) - i;
+       num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
+
+       if (unlikely(num == len)) {
+               /* We wrapped.  Look through start of hash table. */
+               i = 0;
+               hoff = hash_off(tdb, 0);
+               len = (1ULL << tdb->header.v.hash_bits);
+               num = tdb_find_zero_off(tdb, hoff, len);
+               if (num == len) {
+                       tdb->ecode = TDB_ERR_CORRUPT;
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "hash_add: full hash table!\n");
+                       return -1;
+               }
+       }
+       if (tdb_read_off(tdb, hash_off(tdb, i + num)) != 0) {
+               tdb->ecode = TDB_ERR_CORRUPT;
+               tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                        "hash_add: overwriting hash table?\n");
+               return -1;
+       }
+
+       /* FIXME: Encode extra hash bits! */
+       return tdb_write_off(tdb, hash_off(tdb, i + num), off);
+}
+
 /* If we fail, others will try after us. */
 static void enlarge_hash(struct tdb_context *tdb)
 {
-       tdb_off_t newoff, i;
-       uint64_t h, num = 1ULL << tdb->header.v.hash_bits;
+       tdb_off_t newoff, oldoff, i;
+       tdb_len_t hlen;
+       uint64_t num = 1ULL << tdb->header.v.hash_bits;
        struct tdb_used_record pad, *r;
+       unsigned int records = 0;
 
        /* FIXME: We should do this without holding locks throughout. */
        if (tdb_allrecord_lock(tdb, F_WRLCK, TDB_LOCK_WAIT, false) == -1)
@@ -541,98 +600,190 @@ static void enlarge_hash(struct tdb_context *tdb)
        if ((1ULL << tdb->header.v.hash_bits) != num)
                goto unlock;
 
-       newoff = alloc(tdb, 0, num * 2, 0, false);
+again:
+       /* Allocate our new array. */
+       hlen = num * sizeof(tdb_off_t) * 2;
+       newoff = alloc(tdb, 0, hlen, 0, false);
        if (unlikely(newoff == TDB_OFF_ERR))
                goto unlock;
        if (unlikely(newoff == 0)) {
-               if (tdb_expand(tdb, 0, num * 2, false) == -1)
-                       goto unlock;
-               newoff = alloc(tdb, 0, num * 2, 0, false);
-               if (newoff == TDB_OFF_ERR || newoff == 0)
+               if (tdb_expand(tdb, 0, hlen, false) == -1)
                        goto unlock;
+               goto again;
        }
+       /* Step over record header! */
+       newoff += sizeof(struct tdb_used_record);
+
+       /* Starts all zero. */
+       if (zero_out(tdb, newoff, hlen) == -1)
+               goto unlock;
+
+       /* Update header now so we can use normal routines. */
+       oldoff = tdb->header.v.hash_off;
+
+       tdb->header.v.hash_bits++;
+       tdb->header.v.hash_off = newoff;
 
        /* FIXME: If the space before is empty, we know this is in its ideal
-        * location.  We can steal a bit from the pointer to avoid rehash. */
-       for (i = tdb_find_nonzero_off(tdb, tdb->header.v.hash_off, num);
-            i < num;
-            i += tdb_find_nonzero_off(tdb, tdb->header.v.hash_off
-                                      + i*sizeof(tdb_off_t), num - i)) {
+        * location.  Or steal a bit from the pointer to avoid rehash. */
+       for (i = 0; i < num; i++) {
                tdb_off_t off;
-               off = tdb_read_off(tdb, tdb->header.v.hash_off
-                                  + i*sizeof(tdb_off_t));
+               off = tdb_read_off(tdb, oldoff + i * sizeof(tdb_off_t));
                if (unlikely(off == TDB_OFF_ERR))
-                       goto unlock;
-               if (unlikely(!off)) {
-                       tdb->ecode = TDB_ERR_CORRUPT;
-                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
-                                "find_bucket_and_lock: zero hash bucket!\n");
-                       goto unlock;
-               }
-               h = hash_record(tdb, off);
-               /* FIXME: Encode extra hash bits! */
-               if (tdb_write_off(tdb, newoff
-                                 + (h & ((num * 2) - 1)) * sizeof(uint64_t),
-                                 off) == -1)
-                       goto unlock;
+                       goto oldheader;
+               if (off && hash_add(tdb, hash_record(tdb, off), off) == -1)
+                       goto oldheader;
+               if (off)
+                       records++;
        }
 
+       tdb->log(tdb, TDB_DEBUG_TRACE, tdb->log_priv,
+                "enlarge_hash: moved %u records from %llu buckets.\n",
+                records, (long long)num);
+
        /* Free up old hash. */
-       r = tdb_get(tdb, tdb->header.v.hash_off, &pad, sizeof(*r));
+       r = tdb_get(tdb, oldoff - sizeof(*r), &pad, sizeof(*r));
        if (!r)
-               goto unlock;
-       add_free_record(tdb, tdb->header.v.hash_off,
-                       rec_data_length(r) + rec_extra_padding(r));
+               goto oldheader;
+       add_free_record(tdb, oldoff - sizeof(*r),
+                       sizeof(*r)+rec_data_length(r)+rec_extra_padding(r));
 
        /* Now we write the modified header. */
-       tdb->header.v.generation++;
-       tdb->header.v.hash_bits++;
-       tdb->header.v.hash_off = newoff;
-       tdb_write_convert(tdb, offsetof(struct tdb_header, v),
-                         &tdb->header.v, sizeof(tdb->header.v));
+       write_header(tdb);
 unlock:
        tdb_allrecord_unlock(tdb, F_WRLCK);
+       return;
+
+oldheader:
+       tdb->header.v.hash_bits--;
+       tdb->header.v.hash_off = oldoff;
+       goto unlock;
+}
+
+
+/* This is the slow version of the routine which searches the
+ * hashtable for an entry.
+ * We lock every hash bucket up to and including the next zero one.
+ */
+static tdb_off_t find_and_lock_slow(struct tdb_context *tdb,
+                                   struct tdb_data key,
+                                   uint64_t h,
+                                   int ltype,
+                                   tdb_off_t *start_lock,
+                                   tdb_len_t *num_locks,
+                                   tdb_off_t *bucket,
+                                   struct tdb_used_record *rec)
+{
+       /* Warning: this may drop the lock on *bucket! */
+       *num_locks = relock_hash_to_zero(tdb, *start_lock, ltype);
+       if (*num_locks == TDB_OFF_ERR)
+               return TDB_OFF_ERR;
+
+       for (*bucket = *start_lock;
+            *bucket < *start_lock + *num_locks;
+            (*bucket)++) {
+               tdb_off_t off = entry_matches(tdb, *bucket, h, &key, rec);
+               /* Empty entry or we found it? */
+               if (off == 0 || off != TDB_OFF_ERR)
+                       return off;
+       }
+
+       /* We didn't find a zero entry?  Something went badly wrong... */
+       unlock_lists(tdb, *start_lock, *start_lock + *num_locks, ltype);
+       tdb->ecode = TDB_ERR_CORRUPT;
+       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                "find_and_lock: expected to find an empty hash bucket!\n");
+       return TDB_OFF_ERR;
+}
+
+/* This is the core routine which searches the hashtable for an entry.
+ * On error, no locks are held and TDB_OFF_ERR is returned.
+ * Otherwise, *num_locks locks of type ltype from *start_lock are held.
+ * The bucket where the entry is (or would be) is in *bucket.
+ * If not found, the return value is 0.
+ * If found, the return value is the offset, and *rec is the record. */
+static tdb_off_t find_and_lock(struct tdb_context *tdb,
+                              struct tdb_data key,
+                              uint64_t h,
+                              int ltype,
+                              tdb_off_t *start_lock,
+                              tdb_len_t *num_locks,
+                              tdb_off_t *bucket,
+                              struct tdb_used_record *rec)
+{
+       tdb_off_t off;
+
+       /* FIXME: can we avoid locks for some fast paths? */
+       *start_lock = tdb_lock_list(tdb, h, ltype, TDB_LOCK_WAIT);
+       if (*start_lock == TDB_OFF_ERR)
+               return TDB_OFF_ERR;
+
+       /* Fast path. */
+       off = entry_matches(tdb, *start_lock, h, &key, rec);
+       if (likely(off != TDB_OFF_ERR)) {
+               *bucket = *start_lock;
+               *num_locks = 1;
+               return off;
+       }
+
+       /* Slow path, need to grab more locks and search. */
+       return find_and_lock_slow(tdb, key, h, ltype, start_lock, num_locks,
+                                 bucket, rec);
+}
+
+/* Returns -1 on error, 0 on OK, 1 on "expand and retry." */
+static int replace_data(struct tdb_context *tdb,
+                       uint64_t h, struct tdb_data key, struct tdb_data dbuf,
+                       tdb_off_t bucket,
+                       tdb_off_t old_off, tdb_len_t old_room,
+                       bool growing)
+{
+       tdb_off_t new_off;
+
+       /* Allocate a new record. */
+       new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
+       if (unlikely(new_off == TDB_OFF_ERR))
+               return -1;
+
+       if (unlikely(new_off == 0))
+               return 1;
+
+       /* We didn't like the existing one: remove it. */
+       if (old_off)
+               add_free_record(tdb, old_off,
+                               sizeof(struct tdb_used_record)
+                               + key.dsize + old_room);
+
+       /* FIXME: Encode extra hash bits! */
+       if (tdb_write_off(tdb, hash_off(tdb, bucket), new_off) == -1)
+               return -1;
+
+       new_off += sizeof(struct tdb_used_record);
+       if (tdb->methods->write(tdb, new_off, key.dptr, key.dsize) == -1)
+               return -1;
+
+       new_off += key.dsize;
+       if (tdb->methods->write(tdb, new_off, dbuf.dptr, dbuf.dsize) == -1)
+               return -1;
+
+       /* FIXME: tdb_increment_seqnum(tdb); */
+       return 0;
 }
 
 int tdb_store(struct tdb_context *tdb,
              struct tdb_data key, struct tdb_data dbuf, int flag)
 {
-       tdb_off_t new_off, off, old_bucket, start, num_locks = 1;
+       tdb_off_t off, bucket, start, num;
+       tdb_len_t old_room = 0;
        struct tdb_used_record rec;
        uint64_t h;
-       bool growing = false;
+       int ret;
 
        h = tdb_hash(tdb, key.dptr, key.dsize);
-
-       /* FIXME: can we avoid locks for some fast paths? */
-       start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
-       if (start == TDB_OFF_ERR)
+       off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
                return -1;
 
-       /* Fast path. */
-       old_bucket = start;
-       off = entry_matches(tdb, start, h, &key, &rec);
-       if (unlikely(off == TDB_OFF_ERR)) {
-               /* Slow path, need to grab more locks and search. */
-               tdb_off_t i;
-
-               /* Warning: this may drop the lock!  Does that on error. */
-               num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
-               if (num_locks == TDB_OFF_ERR)
-                       return -1;
-
-               for (i = start; i < start + num_locks; i++) {
-                       off = entry_matches(tdb, i, h, &key, &rec);
-                       /* Empty entry or we found it? */
-                       if (off == 0 || off != TDB_OFF_ERR) {
-                               old_bucket = i;
-                               break;
-                       }
-               }
-               if (i == start + num_locks)
-                       off = 0;
-       }
-
        /* Now we have lock on this hash bucket. */
        if (flag == TDB_INSERT) {
                if (off) {
@@ -641,18 +792,22 @@ int tdb_store(struct tdb_context *tdb,
                }
        } else {
                if (off) {
-                       if (rec_data_length(&rec) + rec_extra_padding(&rec)
-                           >= dbuf.dsize) {
-                               new_off = off;
+                       old_room = rec_data_length(&rec)
+                               + rec_extra_padding(&rec);
+                       if (old_room >= dbuf.dsize) {
+                               /* Can modify in-place.  Easy! */
                                if (update_rec_hdr(tdb, off,
                                                   key.dsize, dbuf.dsize,
                                                   &rec, h))
                                        goto fail;
-                               goto write;
+                               if (tdb->methods->write(tdb, off + sizeof(rec)
+                                                       + key.dsize,
+                                                       dbuf.dptr, dbuf.dsize))
+                                       goto fail;
+                               unlock_lists(tdb, start, num, F_WRLCK);
+                               return 0;
                        }
                        /* FIXME: See if right record is free? */
-                       /* Hint to allocator that we've realloced. */
-                       growing = true;
                } else {
                        if (flag == TDB_MODIFY) {
                                /* if the record doesn't exist and we
@@ -664,202 +819,191 @@ int tdb_store(struct tdb_context *tdb,
                }
        }
 
-       /* Allocate a new record. */
-       new_off = alloc(tdb, key.dsize, dbuf.dsize, h, growing);
-       if (new_off == 0) {
-               unlock_range(tdb, start, num_locks, F_WRLCK);
+       /* If we didn't use the old record, this implies we're growing. */
+       ret = replace_data(tdb, h, key, dbuf, bucket, off, old_room, off != 0);
+       unlock_lists(tdb, start, num, F_WRLCK);
+
+       if (unlikely(ret == 1)) {
                /* Expand, then try again... */
-               if (tdb_expand(tdb, key.dsize, dbuf.dsize, growing) == -1)
+               if (tdb_expand(tdb, key.dsize, dbuf.dsize, off != 0) == -1)
                        return -1;
                return tdb_store(tdb, key, dbuf, flag);
        }
 
-       /* We didn't like the existing one: remove it. */
-       if (off) {
-               add_free_record(tdb, off, sizeof(struct tdb_used_record)
-                               + rec_key_length(&rec)
-                               + rec_data_length(&rec)
-                               + rec_extra_padding(&rec));
-       }
-
-write:
-       /* FIXME: Encode extra hash bits! */
-       if (tdb_write_off(tdb, hash_off(tdb, old_bucket), new_off) == -1)
-               goto fail;
-
-       off = new_off + sizeof(struct tdb_used_record);
-       if (tdb->methods->write(tdb, off, key.dptr, key.dsize) == -1)
-               goto fail;
-       off += key.dsize;
-       if (tdb->methods->write(tdb, off, dbuf.dptr, dbuf.dsize) == -1)
-               goto fail;
-
-       /* FIXME: tdb_increment_seqnum(tdb); */
-       unlock_range(tdb, start, num_locks, F_WRLCK);
-
        /* FIXME: by simple simulation, this approximated 60% full.
         * Check in real case! */
-       if (unlikely(num_locks > 4 * tdb->header.v.hash_bits - 31))
+       if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
                enlarge_hash(tdb);
 
-       return 0;
+       return ret;
 
 fail:
-       unlock_range(tdb, start, num_locks, F_WRLCK);
+       unlock_lists(tdb, start, num, F_WRLCK);
        return -1;
 }
 
-struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
+int tdb_append(struct tdb_context *tdb,
+              struct tdb_data key, struct tdb_data dbuf)
 {
-       tdb_off_t off, start, num_locks = 1;
+       tdb_off_t off, bucket, start, num;
        struct tdb_used_record rec;
+       tdb_len_t old_room = 0, old_dlen;
        uint64_t h;
-       struct tdb_data ret;
+       unsigned char *newdata;
+       struct tdb_data new_dbuf;
+       int ret;
 
        h = tdb_hash(tdb, key.dptr, key.dsize);
+       off = find_and_lock(tdb, key, h, F_WRLCK, &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return -1;
 
-       /* FIXME: can we avoid locks for some fast paths? */
-       start = tdb_lock_list(tdb, h, F_RDLCK, TDB_LOCK_WAIT);
-       if (start == TDB_OFF_ERR)
-               return tdb_null;
+       if (off) {
+               old_dlen = rec_data_length(&rec);
+               old_room = old_dlen + rec_extra_padding(&rec);
 
-       /* Fast path. */
-       off = entry_matches(tdb, start, h, &key, &rec);
-       if (unlikely(off == TDB_OFF_ERR)) {
-               /* Slow path, need to grab more locks and search. */
-               tdb_off_t i;
-
-               /* Warning: this may drop the lock!  Does that on error. */
-               num_locks = relock_hash_to_zero(tdb, start, F_RDLCK);
-               if (num_locks == TDB_OFF_ERR)
-                       return tdb_null;
-
-               for (i = start; i < start + num_locks; i++) {
-                       off = entry_matches(tdb, i, h, &key, &rec);
-                       /* Empty entry or we found it? */
-                       if (off == 0 || off != TDB_OFF_ERR)
-                               break;
+               /* Fast path: can append in place. */
+               if (rec_extra_padding(&rec) >= dbuf.dsize) {
+                       if (update_rec_hdr(tdb, off, key.dsize,
+                                          old_dlen + dbuf.dsize, &rec, h))
+                               goto fail;
+
+                       off += sizeof(rec) + key.dsize + old_dlen;
+                       if (tdb->methods->write(tdb, off, dbuf.dptr,
+                                               dbuf.dsize) == -1)
+                               goto fail;
+
+                       /* FIXME: tdb_increment_seqnum(tdb); */
+                       unlock_lists(tdb, start, num, F_WRLCK);
+                       return 0;
+               }
+               /* FIXME: Check right record free? */
+
+               /* Slow path. */
+               newdata = malloc(key.dsize + old_dlen + dbuf.dsize);
+               if (!newdata) {
+                       tdb->ecode = TDB_ERR_OOM;
+                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
+                                "tdb_append: cannot allocate %llu bytes!\n",
+                                (long long)key.dsize + old_dlen + dbuf.dsize);
+                       goto fail;
                }
-               if (i == start + num_locks)
-                       off = 0;
+               if (tdb->methods->read(tdb, off + sizeof(rec) + key.dsize,
+                                      newdata, old_dlen) != 0) {
+                       free(newdata);
+                       goto fail;
+               }
+               memcpy(newdata + old_dlen, dbuf.dptr, dbuf.dsize);
+               new_dbuf.dptr = newdata;
+               new_dbuf.dsize = old_dlen + dbuf.dsize;
+       } else {
+               newdata = NULL;
+               new_dbuf = dbuf;
        }
 
-       if (!off) {
-               unlock_range(tdb, start, num_locks, F_RDLCK);
-               tdb->ecode = TDB_ERR_NOEXIST;
-               return tdb_null;
+       /* If they're using tdb_append(), it implies they're growing record. */
+       ret = replace_data(tdb, h, key, new_dbuf, bucket, off, old_room, true);
+       unlock_lists(tdb, start, num, F_WRLCK);
+       free(newdata);
+
+       if (unlikely(ret == 1)) {
+               /* Expand, then try again. */
+               if (tdb_expand(tdb, key.dsize, dbuf.dsize, true) == -1)
+                       return -1;
+               return tdb_append(tdb, key, dbuf);
        }
 
-       ret.dsize = rec_data_length(&rec);
-       ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
-                                 ret.dsize);
-       unlock_range(tdb, start, num_locks, F_RDLCK);
+       /* FIXME: by simple simulation, this approximated 60% full.
+        * Check in real case! */
+       if (unlikely(num > 4 * tdb->header.v.hash_bits - 30))
+               enlarge_hash(tdb);
+
        return ret;
+
+fail:
+       unlock_lists(tdb, start, num, F_WRLCK);
+       return -1;
 }
 
-static int hash_add(struct tdb_context *tdb, uint64_t h, tdb_off_t off)
+struct tdb_data tdb_fetch(struct tdb_context *tdb, struct tdb_data key)
 {
-       tdb_off_t i, hoff, len, num;
+       tdb_off_t off, start, num, bucket;
+       struct tdb_used_record rec;
+       uint64_t h;
+       struct tdb_data ret;
 
-       /* Look for next space. */
-       i = (h & ((1ULL << tdb->header.v.hash_bits) - 1));
-       len = (1ULL << tdb->header.v.hash_bits) - i;
-       num = tdb_find_zero_off(tdb, hash_off(tdb, i), len);
+       h = tdb_hash(tdb, key.dptr, key.dsize);
+       off = find_and_lock(tdb, key, h, F_RDLCK, &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return tdb_null;
 
-       if (unlikely(num == len)) {
-               /* We wrapped.  Look through start of hash table. */
-               hoff = hash_off(tdb, 0);
-               len = (1ULL << tdb->header.v.hash_bits);
-               num = tdb_find_zero_off(tdb, hoff, len);
-               if (i == len) {
-                       tdb->ecode = TDB_ERR_CORRUPT;
-                       tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
-                                "hash_add: full hash table!\n");
-                       return -1;
-               }
+       if (!off) {
+               tdb->ecode = TDB_ERR_NOEXIST;
+               ret = tdb_null;
+       } else {
+               ret.dsize = rec_data_length(&rec);
+               ret.dptr = tdb_alloc_read(tdb, off + sizeof(rec) + key.dsize,
+                                         ret.dsize);
        }
-       /* FIXME: Encode extra hash bits! */
-       return tdb_write_off(tdb, hash_off(tdb, i + num), off);
+
+       unlock_lists(tdb, start, num, F_RDLCK);
+       return ret;
 }
 
 int tdb_delete(struct tdb_context *tdb, struct tdb_data key)
 {
-       tdb_off_t i, old_bucket, off, start, num_locks = 1;
+       tdb_off_t i, bucket, off, start, num;
        struct tdb_used_record rec;
        uint64_t h;
 
        h = tdb_hash(tdb, key.dptr, key.dsize);
-
-       /* FIXME: can we avoid locks for some fast paths? */
        start = tdb_lock_list(tdb, h, F_WRLCK, TDB_LOCK_WAIT);
-       if (start == TDB_OFF_ERR)
+       if (unlikely(start == TDB_OFF_ERR))
                return -1;
 
-       /* Fast path. */
-       old_bucket = start;
-       off = entry_matches(tdb, start, h, &key, &rec);
-       if (off && off != TDB_OFF_ERR) {
-               /* We can only really fastpath delete if next bucket
-                * is 0.  Note that we haven't locked it, but our lock
-                * on this bucket stops anyone overflowing into it
-                * while we look. */
-               if (tdb_read_off(tdb, hash_off(tdb, h+1)) == 0)
-                       goto delete;
-               /* Slow path. */
-               off = TDB_OFF_ERR;
-       }
-
-       if (unlikely(off == TDB_OFF_ERR)) {
-               /* Slow path, need to grab more locks and search. */
-               tdb_off_t i;
-
-               /* Warning: this may drop the lock!  Does that on error. */
-               num_locks = relock_hash_to_zero(tdb, start, F_WRLCK);
-               if (num_locks == TDB_OFF_ERR)
-                       return -1;
-
-               for (i = start; i < start + num_locks; i++) {
-                       off = entry_matches(tdb, i, h, &key, &rec);
-                       /* Empty entry or we found it? */
-                       if (off == 0 || off != TDB_OFF_ERR) {
-                               old_bucket = i;
-                               break;
-                       }
-               }
-               if (i == start + num_locks)
-                       off = 0;
-       }
+       /* FIXME: Fastpath: if next is zero, we can delete without lock,
+        * since this lock protects us. */
+       off = find_and_lock_slow(tdb, key, h, F_WRLCK,
+                                &start, &num, &bucket, &rec);
+       if (unlikely(off == TDB_OFF_ERR))
+               return -1;
 
        if (!off) {
-               unlock_range(tdb, start, num_locks, F_WRLCK);
+               /* FIXME: We could optimize not found case if it mattered, by
+                * reading offset after first lock: if it's zero, goto here. */
+               unlock_lists(tdb, start, num, F_WRLCK);
                tdb->ecode = TDB_ERR_NOEXIST;
                return -1;
        }
+       /* Since we found the entry, we must have locked it and a zero. */
+       assert(num >= 2);
 
-delete:
        /* This actually unlinks it. */
-       if (tdb_write_off(tdb, hash_off(tdb, old_bucket), 0) == -1)
+       if (tdb_write_off(tdb, hash_off(tdb, bucket), 0) == -1)
                goto unlock_err;
 
        /* Rehash anything following. */
-       for (i = hash_off(tdb, old_bucket+1);
-            i != hash_off(tdb, h + num_locks);
-            i += sizeof(tdb_off_t)) {
-               tdb_off_t off2;
+       for (i = bucket+1; i != bucket + num - 1; i++) {
+               tdb_off_t hoff, off2;
                uint64_t h2;
 
-               off2 = tdb_read_off(tdb, i);
+               hoff = hash_off(tdb, i);
+               off2 = tdb_read_off(tdb, hoff);
                if (unlikely(off2 == TDB_OFF_ERR))
                        goto unlock_err;
 
+               /* This can happen if we raced. */
+               if (unlikely(off2 == 0))
+                       break;
+
                /* Maybe use a bit to indicate it is in ideal place? */
                h2 = hash_record(tdb, off2);
                /* Is it happy where it is? */
-               if (hash_off(tdb, h2) == i)
+               if (hash_off(tdb, h2) == hoff)
                        continue;
 
                /* Remove it. */
-               if (tdb_write_off(tdb, i, 0) == -1)
+               if (tdb_write_off(tdb, hoff, 0) == -1)
                        goto unlock_err;
 
                /* Rehash it. */
@@ -875,11 +1019,11 @@ delete:
                            + rec_extra_padding(&rec)) != 0)
                goto unlock_err;
 
-       unlock_range(tdb, start, num_locks, F_WRLCK);
+       unlock_lists(tdb, start, num, F_WRLCK);
        return 0;
 
 unlock_err:
-       unlock_range(tdb, start, num_locks, F_WRLCK);
+       unlock_lists(tdb, start, num, F_WRLCK);
        return -1;
 }