]> git.ozlabs.org Git - ccan/blobdiff - ccan/tdb2/hash.c
tdb2: simplify logging levels, rename TDB_DEBUG_* to TDB_LOG_*
[ccan] / ccan / tdb2 / hash.c
index 0fd4774939d0399b9aa1706a3bf4d01b53c69bbd..17601c0c8bf3fc2bf3ed2496735c3dc725e8e7fd 100644 (file)
@@ -1,7 +1,7 @@
- /* 
+ /*
    Trivial Database 2: hash handling
    Copyright (C) Rusty Russell 2010
-   
+
    This library is free software; you can redistribute it and/or
    modify it under the terms of the GNU Lesser General Public
    License as published by the Free Software Foundation; either
@@ -42,17 +42,19 @@ uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
 
 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
 {
-       struct tdb_used_record pad, *r;
+       const struct tdb_used_record *r;
        const void *key;
        uint64_t klen, hash;
 
-       r = tdb_get(tdb, off, &pad, sizeof(pad));
+       r = tdb_access_read(tdb, off, sizeof(*r), true);
        if (!r)
                /* FIXME */
                return 0;
 
        klen = rec_key_length(r);
-       key = tdb_access_read(tdb, off + sizeof(pad), klen, false);
+       tdb_access_release(tdb, r);
+
+       key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
        if (!key)
                return 0;
 
@@ -62,7 +64,7 @@ uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
 }
 
 /* Get bits from a value. */
-static uint32_t bits(uint64_t val, unsigned start, unsigned num)
+static uint32_t bits_from(uint64_t val, unsigned start, unsigned num)
 {
        assert(num <= 32);
        return (val >> start) & ((1U << num) - 1);
@@ -73,7 +75,31 @@ static uint32_t bits(uint64_t val, unsigned start, unsigned num)
 static uint32_t use_bits(struct hash_info *h, unsigned num)
 {
        h->hash_used += num;
-       return bits(h->h, 64 - h->hash_used, num);
+       return bits_from(h->h, 64 - h->hash_used, num);
+}
+
+static bool key_matches(struct tdb_context *tdb,
+                       const struct tdb_used_record *rec,
+                       tdb_off_t off,
+                       const struct tdb_data *key)
+{
+       bool ret = false;
+       const char *rkey;
+
+       if (rec_key_length(rec) != key->dsize) {
+               add_stat(tdb, compare_wrong_keylen, 1);
+               return ret;
+       }
+
+       rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
+       if (!rkey)
+               return ret;
+       if (memcmp(rkey, key->dptr, key->dsize) == 0)
+               ret = true;
+       else
+               add_stat(tdb, compare_wrong_keycmp, 1);
+       tdb_access_release(tdb, rkey);
+       return ret;
 }
 
 /* Does entry match? */
@@ -83,38 +109,33 @@ static bool match(struct tdb_context *tdb,
                  tdb_off_t val,
                  struct tdb_used_record *rec)
 {
-       bool ret;
-       const unsigned char *rkey;
        tdb_off_t off;
 
-       /* FIXME: Handle hash value truncated. */
-       if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
-               abort();
-
+       add_stat(tdb, compares, 1);
        /* Desired bucket must match. */
-       if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
+       if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
+               add_stat(tdb, compare_wrong_bucket, 1);
                return false;
+       }
 
        /* Top bits of offset == next bits of hash. */
-       if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
-           != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
-                   TDB_OFF_UPPER_STEAL_EXTRA))
+       if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
+           != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
+                   TDB_OFF_UPPER_STEAL_EXTRA)) {
+               add_stat(tdb, compare_wrong_offsetbits, 1);
                return false;
+       }
 
        off = val & TDB_OFF_MASK;
        if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
                return false;
 
-       /* FIXME: check extra bits in header? */
-       if (rec_key_length(rec) != key->dsize)
+       if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
+               add_stat(tdb, compare_wrong_rechash, 1);
                return false;
+       }
 
-       rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
-       if (!rkey)
-               return false;
-       ret = (memcmp(rkey, key->dptr, key->dsize) == 0);
-       tdb_access_release(tdb, rkey);
-       return ret;
+       return key_matches(tdb, rec, off, key);
 }
 
 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
@@ -123,10 +144,75 @@ static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
                + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
 }
 
-/* Truncated hashes can't be all 1: that's how we spot a sub-hash */
 bool is_subhash(tdb_off_t val)
 {
-       return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
+       return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
+}
+
+/* FIXME: Guess the depth, don't over-lock! */
+static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
+{
+       *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
+       return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
+}
+
+static tdb_off_t COLD find_in_chain(struct tdb_context *tdb,
+                                   struct tdb_data key,
+                                   tdb_off_t chain,
+                                   struct hash_info *h,
+                                   struct tdb_used_record *rec,
+                                   struct traverse_info *tinfo)
+{
+       tdb_off_t off, next;
+
+       /* In case nothing is free, we set these to zero. */
+       h->home_bucket = h->found_bucket = 0;
+
+       for (off = chain; off; off = next) {
+               unsigned int i;
+
+               h->group_start = off;
+               if (tdb_read_convert(tdb, off, h->group, sizeof(h->group)))
+                       return TDB_OFF_ERR;
+
+               for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
+                       tdb_off_t recoff;
+                       if (!h->group[i]) {
+                               /* Remember this empty bucket. */
+                               h->home_bucket = h->found_bucket = i;
+                               continue;
+                       }
+
+                       /* We can insert extra bits via add_to_hash
+                        * empty bucket logic. */
+                       recoff = h->group[i] & TDB_OFF_MASK;
+                       if (tdb_read_convert(tdb, recoff, rec, sizeof(*rec)))
+                               return TDB_OFF_ERR;
+
+                       if (key_matches(tdb, rec, recoff, &key)) {
+                               h->home_bucket = h->found_bucket = i;
+
+                               if (tinfo) {
+                                       tinfo->levels[tinfo->num_levels]
+                                               .hashtable = off;
+                                       tinfo->levels[tinfo->num_levels]
+                                               .total_buckets
+                                               = 1 << TDB_HASH_GROUP_BITS;
+                                       tinfo->levels[tinfo->num_levels].entry
+                                               = i;
+                                       tinfo->num_levels++;
+                               }
+                               return recoff;
+                       }
+               }
+               next = tdb_read_off(tdb, off
+                                   + offsetof(struct tdb_chain, next));
+               if (next == TDB_OFF_ERR)
+                       return TDB_OFF_ERR;
+               if (next)
+                       next += sizeof(struct tdb_used_record);
+       }
+       return 0;
 }
 
 /* This is the core routine which searches the hashtable for an entry.
@@ -149,11 +235,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb,
        group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
        h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
 
-       /* FIXME: Guess the depth, don't over-lock! */
-       h->hlock_start = (tdb_off_t)group
-               << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
-       h->hlock_range = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS
-                                       - TDB_HASH_GROUP_BITS));
+       h->hlock_start = hlock_range(group, &h->hlock_range);
        if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
                            TDB_LOCK_WAIT))
                return TDB_OFF_ERR;
@@ -163,12 +245,12 @@ tdb_off_t find_and_lock(struct tdb_context *tdb,
                tinfo->toplevel_group = group;
                tinfo->num_levels = 1;
                tinfo->levels[0].entry = 0;
-               tinfo->levels[0].hashtable = hashtable 
+               tinfo->levels[0].hashtable = hashtable
                        + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t);
                tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS;
        }
 
-       while (likely(h->hash_used < 64)) {
+       while (h->hash_used <= 64) {
                /* Read in the hash group. */
                h->group_start = hashtable
                        + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
@@ -225,8 +307,7 @@ tdb_off_t find_and_lock(struct tdb_context *tdb,
                return 0;
        }
 
-       /* FIXME: We hit the bottom.  Chain! */
-       abort();
+       return find_in_chain(tdb, key, hashtable, h, rec, tinfo);
 
 fail:
        tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
@@ -236,8 +317,8 @@ fail:
 /* I wrote a simple test, expanding a hash to 2GB, for the following
  * cases:
  * 1) Expanding all the buckets at once,
- * 2) Expanding the most-populated bucket,
- * 3) Expanding the bucket we wanted to place the new entry ito.
+ * 2) Expanding the bucket we wanted to place the new entry into.
+ * 3) Expanding the most-populated bucket,
  *
  * I measured the worst/average/best density during this process.
  * 1) 3%/16%/30%
@@ -297,13 +378,13 @@ static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
 {
        return h->home_bucket
                | new_off
-               | ((uint64_t)bits(h->h,
+               | ((uint64_t)bits_from(h->h,
                                  64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
                                  TDB_OFF_UPPER_STEAL_EXTRA)
                   << TDB_OFF_HASH_EXTRA_BIT);
 }
 
-/* Simply overwrite the hash entry we found before. */ 
+/* Simply overwrite the hash entry we found before. */
 int replace_in_hash(struct tdb_context *tdb,
                    struct hash_info *h,
                    tdb_off_t new_off)
@@ -312,6 +393,41 @@ int replace_in_hash(struct tdb_context *tdb,
                             encode_offset(new_off, h));
 }
 
+/* We slot in anywhere that's empty in the chain. */
+static int COLD add_to_chain(struct tdb_context *tdb,
+                            tdb_off_t subhash,
+                            tdb_off_t new_off)
+{
+       size_t entry = tdb_find_zero_off(tdb, subhash, 1<<TDB_HASH_GROUP_BITS);
+
+       if (entry == 1 << TDB_HASH_GROUP_BITS) {
+               tdb_off_t next;
+
+               next = tdb_read_off(tdb, subhash
+                                   + offsetof(struct tdb_chain, next));
+               if (next == TDB_OFF_ERR)
+                       return -1;
+
+               if (!next) {
+                       next = alloc(tdb, 0, sizeof(struct tdb_chain), 0,
+                                    TDB_CHAIN_MAGIC, false);
+                       if (next == TDB_OFF_ERR)
+                               return -1;
+                       if (zero_out(tdb, next+sizeof(struct tdb_used_record),
+                                    sizeof(struct tdb_chain)))
+                               return -1;
+                       if (tdb_write_off(tdb, subhash
+                                         + offsetof(struct tdb_chain, next),
+                                         next) != 0)
+                               return -1;
+               }
+               return add_to_chain(tdb, next, new_off);
+       }
+
+       return tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t),
+                            new_off);
+}
+
 /* Add into a newly created subhash. */
 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
                          unsigned hash_used, tdb_off_t val)
@@ -322,14 +438,12 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
 
        h.hash_used = hash_used;
 
-       /* FIXME chain if hash_used == 64 */
        if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
-               abort();
+               return add_to_chain(tdb, subhash, off);
 
-       /* FIXME: Do truncated hash bits if we can! */
        h.h = hash_record(tdb, off);
        gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
-       h.group_start = subhash + sizeof(struct tdb_used_record)
+       h.group_start = subhash
                + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
        h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
 
@@ -343,20 +457,29 @@ static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
 
 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
 {
-       unsigned bucket, num_vals, i;
+       unsigned bucket, num_vals, i, magic;
+       size_t subsize;
        tdb_off_t subhash;
        tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
 
        /* Attach new empty subhash under fullest bucket. */
        bucket = fullest_bucket(tdb, h->group, h->home_bucket);
 
-       subhash = alloc(tdb, 0, sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS,
-                       0, false);
+       if (h->hash_used == 64) {
+               add_stat(tdb, alloc_chain, 1);
+               subsize = sizeof(struct tdb_chain);
+               magic = TDB_CHAIN_MAGIC;
+       } else {
+               add_stat(tdb, alloc_subhash, 1);
+               subsize = (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS);
+               magic = TDB_HTABLE_MAGIC;
+       }
+
+       subhash = alloc(tdb, 0, subsize, 0, magic, false);
        if (subhash == TDB_OFF_ERR)
                return -1;
 
-       if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
-                    sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
+       if (zero_out(tdb, subhash + sizeof(struct tdb_used_record), subsize))
                return -1;
 
        /* Remove any which are destined for bucket or are in wrong place. */
@@ -374,7 +497,10 @@ static int expand_group(struct tdb_context *tdb, struct hash_info *h)
        /* assert(num_vals); */
 
        /* Overwrite expanded bucket with subhash pointer. */
-       h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
+       h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
+
+       /* Point to actual contents of record. */
+       subhash += sizeof(struct tdb_used_record);
 
        /* Put values back. */
        for (i = 0; i < num_vals; i++) {
@@ -430,10 +556,6 @@ int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
 
 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
 {
-       /* FIXME: chain! */
-       if (h->hash_used >= 64)
-               abort();
-
        /* We hit an empty bucket during search?  That's where it goes. */
        if (!h->group[h->found_bucket]) {
                h->group[h->found_bucket] = encode_offset(new_off, h);
@@ -442,6 +564,9 @@ int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
                                         h->group, sizeof(h->group));
        }
 
+       if (h->hash_used > 64)
+               return add_to_chain(tdb, h->group_start, new_off);
+
        /* We're full.  Expand. */
        if (expand_group(tdb, h) == -1)
                return -1;
@@ -520,7 +645,11 @@ again:
                tlevel++;
                tlevel->hashtable = off + sizeof(struct tdb_used_record);
                tlevel->entry = 0;
-               tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
+               /* Next level is a chain? */
+               if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1))
+                       tlevel->total_buckets = (1 << TDB_HASH_GROUP_BITS);
+               else
+                       tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
                goto again;
        }
 
@@ -528,6 +657,20 @@ again:
        if (tinfo->num_levels == 1)
                return 0;
 
+       /* Handle chained entries. */
+       if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1)) {
+               tlevel->hashtable = tdb_read_off(tdb, tlevel->hashtable
+                                                + offsetof(struct tdb_chain,
+                                                           next));
+               if (tlevel->hashtable == TDB_OFF_ERR)
+                       return TDB_OFF_ERR;
+               if (tlevel->hashtable) {
+                       tlevel->hashtable += sizeof(struct tdb_used_record);
+                       tlevel->entry = 0;
+                       goto again;
+               }
+       }
+
        /* Go back up and keep searching. */
        tinfo->num_levels--;
        tlevel--;
@@ -537,16 +680,16 @@ again:
 /* Return 1 if we find something, 0 if not, -1 on error. */
 int next_in_hash(struct tdb_context *tdb, int ltype,
                 struct traverse_info *tinfo,
-                TDB_DATA *kbuf, unsigned int *dlen)
+                TDB_DATA *kbuf, size_t *dlen)
 {
        const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
-       tdb_off_t hlock_start, hlock_range, off;
+       tdb_off_t hl_start, hl_range, off;
 
        while (tinfo->toplevel_group < (1 << group_bits)) {
-               hlock_start = (tdb_off_t)tinfo->toplevel_group
+               hl_start = (tdb_off_t)tinfo->toplevel_group
                        << (64 - group_bits);
-               hlock_range = 1ULL << group_bits;
-               if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype,
+               hl_range = 1ULL << group_bits;
+               if (tdb_lock_hashes(tdb, hl_start, hl_range, ltype,
                                    TDB_LOCK_WAIT) != 0)
                        return -1;
 
@@ -556,29 +699,37 @@ int next_in_hash(struct tdb_context *tdb, int ltype,
 
                        if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) {
                                tdb_unlock_hashes(tdb,
-                                                 hlock_start, hlock_range,
-                                                 ltype);
+                                                 hl_start, hl_range, ltype);
                                return -1;
                        }
+                       if (rec_magic(&rec) != TDB_USED_MAGIC) {
+                               tdb_logerr(tdb, TDB_ERR_CORRUPT,
+                                          TDB_LOG_ERROR,
+                                          "next_in_hash:"
+                                          " corrupt record at %llu",
+                                          (long long)off);
+                               return -1;
+                       }
+
                        kbuf->dsize = rec_key_length(&rec);
 
                        /* They want data as well? */
                        if (dlen) {
                                *dlen = rec_data_length(&rec);
-                               kbuf->dptr = tdb_alloc_read(tdb, 
+                               kbuf->dptr = tdb_alloc_read(tdb,
                                                            off + sizeof(rec),
                                                            kbuf->dsize
                                                            + *dlen);
                        } else {
-                               kbuf->dptr = tdb_alloc_read(tdb, 
+                               kbuf->dptr = tdb_alloc_read(tdb,
                                                            off + sizeof(rec),
                                                            kbuf->dsize);
                        }
-                       tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
+                       tdb_unlock_hashes(tdb, hl_start, hl_range, ltype);
                        return kbuf->dptr ? 1 : -1;
                }
 
-               tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
+               tdb_unlock_hashes(tdb, hl_start, hl_range, ltype);
 
                tinfo->toplevel_group++;
                tinfo->levels[0].hashtable
@@ -591,7 +742,7 @@ int next_in_hash(struct tdb_context *tdb, int ltype,
 /* Return 1 if we find something, 0 if not, -1 on error. */
 int first_in_hash(struct tdb_context *tdb, int ltype,
                  struct traverse_info *tinfo,
-                 TDB_DATA *kbuf, unsigned int *dlen)
+                 TDB_DATA *kbuf, size_t *dlen)
 {
        tinfo->prev = 0;
        tinfo->toplevel_group = 0;
@@ -603,40 +754,45 @@ int first_in_hash(struct tdb_context *tdb, int ltype,
        return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
 }
 
-TDB_DATA tdb_firstkey(struct tdb_context *tdb)
+/* Even if the entry isn't in this hash bucket, you'd have to lock this
+ * bucket to find it. */
+static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
+                    int ltype, enum tdb_lock_flags waitflag,
+                    const char *func)
 {
-       struct traverse_info tinfo;
-       struct tdb_data k;
-       switch (first_in_hash(tdb, F_RDLCK, &tinfo, &k, NULL)) {
-       case 1:
-               return k;
-       case 0:
-               tdb->ecode = TDB_SUCCESS;
-               /* Fall thru... */
-       default:
-               return tdb_null;
-       }
-}              
+       int ret;
+       uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits_from(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
 
-/* We lock twice, not very efficient.  We could keep last key & tinfo cached. */
-TDB_DATA tdb_nextkey(struct tdb_context *tdb, TDB_DATA key)
+       ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
+       tdb_trace_1rec(tdb, func, *key);
+       return ret;
+}
+
+/* lock/unlock one hash chain. This is meant to be used to reduce
+   contention - it cannot guarantee how many records will be locked */
+int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
 {
-       struct traverse_info tinfo;
-       struct hash_info h;
-       struct tdb_used_record rec;
-
-       tinfo.prev = find_and_lock(tdb, key, F_RDLCK, &h, &rec, &tinfo);
-       if (unlikely(tinfo.prev == TDB_OFF_ERR))
-               return tdb_null;
-       tdb_unlock_hashes(tdb, h.hlock_start, h.hlock_range, F_RDLCK);
-
-       switch (next_in_hash(tdb, F_RDLCK, &tinfo, &key, NULL)) {
-       case 1:
-               return key;
-       case 0:
-               tdb->ecode = TDB_SUCCESS;
-               /* Fall thru... */
-       default:
-               return tdb_null;
-       }
-}              
+       return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
+}
+
+int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
+{
+       uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
+       tdb_off_t lockstart, locksize;
+       unsigned int group, gbits;
+
+       gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
+       group = bits_from(h, 64 - gbits, gbits);
+
+       lockstart = hlock_range(group, &locksize);
+
+       tdb_trace_1rec(tdb, "tdb_chainunlock", key);
+       return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
+}