]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/hash.c
tdb2: remove tdb_get()
[ccan] / ccan / tdb2 / hash.c
1  /* 
2    Trivial Database 2: hash handling
3    Copyright (C) Rusty Russell 2010
4    
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <assert.h>
20 #include <ccan/hash/hash.h>
21
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
23                              void *arg)
24 {
25         uint64_t ret;
26         /* hash64_stable assumes lower bits are more important; they are a
27          * slightly better hash.  We use the upper bits first, so swap them. */
28         ret = hash64_stable((const unsigned char *)key, length, seed);
29         return (ret >> 32) | (ret << 32);
30 }
31
32 void tdb_hash_init(struct tdb_context *tdb)
33 {
34         tdb->khash = jenkins_hash;
35         tdb->hash_priv = NULL;
36 }
37
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
39 {
40         return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
41 }
42
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
44 {
45         const struct tdb_used_record *r;
46         const void *key;
47         uint64_t klen, hash;
48
49         r = tdb_access_read(tdb, off, sizeof(*r), true);
50         if (!r)
51                 /* FIXME */
52                 return 0;
53
54         klen = rec_key_length(r);
55         tdb_access_release(tdb, r);
56
57         key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
58         if (!key)
59                 return 0;
60
61         hash = tdb_hash(tdb, key, klen);
62         tdb_access_release(tdb, key);
63         return hash;
64 }
65
66 /* Get bits from a value. */
67 static uint32_t bits(uint64_t val, unsigned start, unsigned num)
68 {
69         assert(num <= 32);
70         return (val >> start) & ((1U << num) - 1);
71 }
72
73 /* We take bits from the top: that way we can lock whole sections of the hash
74  * by using lock ranges. */
75 static uint32_t use_bits(struct hash_info *h, unsigned num)
76 {
77         h->hash_used += num;
78         return bits(h->h, 64 - h->hash_used, num);
79 }
80
81 /* Does entry match? */
82 static bool match(struct tdb_context *tdb,
83                   struct hash_info *h,
84                   const struct tdb_data *key,
85                   tdb_off_t val,
86                   struct tdb_used_record *rec)
87 {
88         bool ret;
89         const unsigned char *rkey;
90         tdb_off_t off;
91
92         /* FIXME: Handle hash value truncated. */
93         if (bits(val, TDB_OFF_HASH_TRUNCATED_BIT, 1))
94                 abort();
95
96         /* Desired bucket must match. */
97         if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK))
98                 return false;
99
100         /* Top bits of offset == next bits of hash. */
101         if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
102             != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
103                     TDB_OFF_UPPER_STEAL_EXTRA))
104                 return false;
105
106         off = val & TDB_OFF_MASK;
107         if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
108                 return false;
109
110         /* FIXME: check extra bits in header? */
111         if (rec_key_length(rec) != key->dsize)
112                 return false;
113
114         rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
115         if (!rkey)
116                 return false;
117         ret = (memcmp(rkey, key->dptr, key->dsize) == 0);
118         tdb_access_release(tdb, rkey);
119         return ret;
120 }
121
122 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
123 {
124         return group_start
125                 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
126 }
127
128 /* Truncated hashes can't be all 1: that's how we spot a sub-hash */
129 bool is_subhash(tdb_off_t val)
130 {
131         return val >> (64-TDB_OFF_UPPER_STEAL) == (1<<TDB_OFF_UPPER_STEAL) - 1;
132 }
133
134 /* FIXME: Guess the depth, don't over-lock! */
135 static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
136 {
137         *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
138         return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
139 }
140
141 /* This is the core routine which searches the hashtable for an entry.
142  * On error, no locks are held and TDB_OFF_ERR is returned.
143  * Otherwise, hinfo is filled in (and the optional tinfo).
144  * If not found, the return value is 0.
145  * If found, the return value is the offset, and *rec is the record. */
146 tdb_off_t find_and_lock(struct tdb_context *tdb,
147                         struct tdb_data key,
148                         int ltype,
149                         struct hash_info *h,
150                         struct tdb_used_record *rec,
151                         struct traverse_info *tinfo)
152 {
153         uint32_t i, group;
154         tdb_off_t hashtable;
155
156         h->h = tdb_hash(tdb, key.dptr, key.dsize);
157         h->hash_used = 0;
158         group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
159         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
160
161         h->hlock_start = hlock_range(group, &h->hlock_range);
162         if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
163                             TDB_LOCK_WAIT))
164                 return TDB_OFF_ERR;
165
166         hashtable = offsetof(struct tdb_header, hashtable);
167         if (tinfo) {
168                 tinfo->toplevel_group = group;
169                 tinfo->num_levels = 1;
170                 tinfo->levels[0].entry = 0;
171                 tinfo->levels[0].hashtable = hashtable 
172                         + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t);
173                 tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS;
174         }
175
176         while (likely(h->hash_used < 64)) {
177                 /* Read in the hash group. */
178                 h->group_start = hashtable
179                         + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
180
181                 if (tdb_read_convert(tdb, h->group_start, &h->group,
182                                      sizeof(h->group)) == -1)
183                         goto fail;
184
185                 /* Pointer to another hash table?  Go down... */
186                 if (is_subhash(h->group[h->home_bucket])) {
187                         hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
188                                 + sizeof(struct tdb_used_record);
189                         if (tinfo) {
190                                 /* When we come back, use *next* bucket */
191                                 tinfo->levels[tinfo->num_levels-1].entry
192                                         += h->home_bucket + 1;
193                         }
194                         group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
195                                          - TDB_HASH_GROUP_BITS);
196                         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
197                         if (tinfo) {
198                                 tinfo->levels[tinfo->num_levels].hashtable
199                                         = hashtable;
200                                 tinfo->levels[tinfo->num_levels].total_buckets
201                                         = 1 << TDB_SUBLEVEL_HASH_BITS;
202                                 tinfo->levels[tinfo->num_levels].entry
203                                         = group << TDB_HASH_GROUP_BITS;
204                                 tinfo->num_levels++;
205                         }
206                         continue;
207                 }
208
209                 /* It's in this group: search (until 0 or all searched) */
210                 for (i = 0, h->found_bucket = h->home_bucket;
211                      i < (1 << TDB_HASH_GROUP_BITS);
212                      i++, h->found_bucket = ((h->found_bucket+1)
213                                              % (1 << TDB_HASH_GROUP_BITS))) {
214                         if (is_subhash(h->group[h->found_bucket]))
215                                 continue;
216
217                         if (!h->group[h->found_bucket])
218                                 break;
219
220                         if (match(tdb, h, &key, h->group[h->found_bucket],
221                                   rec)) {
222                                 if (tinfo) {
223                                         tinfo->levels[tinfo->num_levels-1].entry
224                                                 += h->found_bucket;
225                                 }
226                                 return h->group[h->found_bucket] & TDB_OFF_MASK;
227                         }
228                 }
229                 /* Didn't find it: h indicates where it would go. */
230                 return 0;
231         }
232
233         /* FIXME: We hit the bottom.  Chain! */
234         abort();
235
236 fail:
237         tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
238         return TDB_OFF_ERR;
239 }
240
241 /* I wrote a simple test, expanding a hash to 2GB, for the following
242  * cases:
243  * 1) Expanding all the buckets at once,
244  * 2) Expanding the most-populated bucket,
245  * 3) Expanding the bucket we wanted to place the new entry ito.
246  *
247  * I measured the worst/average/best density during this process.
248  * 1) 3%/16%/30%
249  * 2) 4%/20%/38%
250  * 3) 6%/22%/41%
251  *
252  * So we figure out the busiest bucket for the moment.
253  */
254 static unsigned fullest_bucket(struct tdb_context *tdb,
255                                const tdb_off_t *group,
256                                unsigned new_bucket)
257 {
258         unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
259         unsigned int i, best_bucket;
260
261         /* Count the new entry. */
262         counts[new_bucket]++;
263         best_bucket = new_bucket;
264
265         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
266                 unsigned this_bucket;
267
268                 if (is_subhash(group[i]))
269                         continue;
270                 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
271                 if (++counts[this_bucket] > counts[best_bucket])
272                         best_bucket = this_bucket;
273         }
274
275         return best_bucket;
276 }
277
278 static bool put_into_group(tdb_off_t *group,
279                            unsigned bucket, tdb_off_t encoded)
280 {
281         unsigned int i;
282
283         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
284                 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
285
286                 if (group[b] == 0) {
287                         group[b] = encoded;
288                         return true;
289                 }
290         }
291         return false;
292 }
293
294 static void force_into_group(tdb_off_t *group,
295                              unsigned bucket, tdb_off_t encoded)
296 {
297         if (!put_into_group(group, bucket, encoded))
298                 abort();
299 }
300
301 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
302 {
303         return h->home_bucket
304                 | new_off
305                 | ((uint64_t)bits(h->h,
306                                   64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
307                                   TDB_OFF_UPPER_STEAL_EXTRA)
308                    << TDB_OFF_HASH_EXTRA_BIT);
309 }
310
311 /* Simply overwrite the hash entry we found before. */ 
312 int replace_in_hash(struct tdb_context *tdb,
313                     struct hash_info *h,
314                     tdb_off_t new_off)
315 {
316         return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
317                              encode_offset(new_off, h));
318 }
319
320 /* Add into a newly created subhash. */
321 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
322                           unsigned hash_used, tdb_off_t val)
323 {
324         tdb_off_t off = (val & TDB_OFF_MASK), *group;
325         struct hash_info h;
326         unsigned int gnum;
327
328         h.hash_used = hash_used;
329
330         /* FIXME chain if hash_used == 64 */
331         if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
332                 abort();
333
334         /* FIXME: Do truncated hash bits if we can! */
335         h.h = hash_record(tdb, off);
336         gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
337         h.group_start = subhash + sizeof(struct tdb_used_record)
338                 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
339         h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
340
341         group = tdb_access_write(tdb, h.group_start,
342                                  sizeof(*group) << TDB_HASH_GROUP_BITS, true);
343         if (!group)
344                 return -1;
345         force_into_group(group, h.home_bucket, encode_offset(off, &h));
346         return tdb_access_commit(tdb, group);
347 }
348
349 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
350 {
351         unsigned bucket, num_vals, i;
352         tdb_off_t subhash;
353         tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
354
355         /* Attach new empty subhash under fullest bucket. */
356         bucket = fullest_bucket(tdb, h->group, h->home_bucket);
357
358         subhash = alloc(tdb, 0, sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS,
359                         0, false);
360         if (subhash == TDB_OFF_ERR)
361                 return -1;
362
363         add_stat(tdb, alloc_subhash, 1);
364         if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
365                      sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
366                 return -1;
367
368         /* Remove any which are destined for bucket or are in wrong place. */
369         num_vals = 0;
370         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
371                 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
372                 if (!h->group[i] || is_subhash(h->group[i]))
373                         continue;
374                 if (home_bucket == bucket || home_bucket != i) {
375                         vals[num_vals++] = h->group[i];
376                         h->group[i] = 0;
377                 }
378         }
379         /* FIXME: This assert is valid, but we do this during unit test :( */
380         /* assert(num_vals); */
381
382         /* Overwrite expanded bucket with subhash pointer. */
383         h->group[bucket] = subhash | ~((1ULL << (64 - TDB_OFF_UPPER_STEAL))-1);
384
385         /* Put values back. */
386         for (i = 0; i < num_vals; i++) {
387                 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
388
389                 if (this_bucket == bucket) {
390                         if (add_to_subhash(tdb, subhash, h->hash_used, vals[i]))
391                                 return -1;
392                 } else {
393                         /* There should be room to put this back. */
394                         force_into_group(h->group, this_bucket, vals[i]);
395                 }
396         }
397         return 0;
398 }
399
400 int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
401 {
402         unsigned int i, num_movers = 0;
403         tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
404
405         h->group[h->found_bucket] = 0;
406         for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
407                 unsigned this_bucket;
408
409                 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
410                 /* Empty bucket?  We're done. */
411                 if (!h->group[this_bucket])
412                         break;
413
414                 /* Ignore subhashes. */
415                 if (is_subhash(h->group[this_bucket]))
416                         continue;
417
418                 /* If this one is not happy where it is, we'll move it. */
419                 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
420                     != this_bucket) {
421                         movers[num_movers++] = h->group[this_bucket];
422                         h->group[this_bucket] = 0;
423                 }
424         }
425
426         /* Put back the ones we erased. */
427         for (i = 0; i < num_movers; i++) {
428                 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
429                                  movers[i]);
430         }
431
432         /* Now we write back the hash group */
433         return tdb_write_convert(tdb, h->group_start,
434                                  h->group, sizeof(h->group));
435 }
436
437 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
438 {
439         /* FIXME: chain! */
440         if (h->hash_used >= 64)
441                 abort();
442
443         /* We hit an empty bucket during search?  That's where it goes. */
444         if (!h->group[h->found_bucket]) {
445                 h->group[h->found_bucket] = encode_offset(new_off, h);
446                 /* Write back the modified group. */
447                 return tdb_write_convert(tdb, h->group_start,
448                                          h->group, sizeof(h->group));
449         }
450
451         /* We're full.  Expand. */
452         if (expand_group(tdb, h) == -1)
453                 return -1;
454
455         if (is_subhash(h->group[h->home_bucket])) {
456                 /* We were expanded! */
457                 tdb_off_t hashtable;
458                 unsigned int gnum;
459
460                 /* Write back the modified group. */
461                 if (tdb_write_convert(tdb, h->group_start, h->group,
462                                       sizeof(h->group)))
463                         return -1;
464
465                 /* Move hashinfo down a level. */
466                 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
467                         + sizeof(struct tdb_used_record);
468                 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
469                 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
470                 h->group_start = hashtable
471                         + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
472                 if (tdb_read_convert(tdb, h->group_start, &h->group,
473                                      sizeof(h->group)) == -1)
474                         return -1;
475         }
476
477         /* Expanding the group must have made room if it didn't choose this
478          * bucket. */
479         if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h)))
480                 return tdb_write_convert(tdb, h->group_start,
481                                          h->group, sizeof(h->group));
482
483         /* This can happen if all hashes in group (and us) dropped into same
484          * group in subhash. */
485         return add_to_hash(tdb, h, new_off);
486 }
487
488 /* Traverse support: returns offset of record, or 0 or TDB_OFF_ERR. */
489 static tdb_off_t iterate_hash(struct tdb_context *tdb,
490                               struct traverse_info *tinfo)
491 {
492         tdb_off_t off, val;
493         unsigned int i;
494         struct traverse_level *tlevel;
495
496         tlevel = &tinfo->levels[tinfo->num_levels-1];
497
498 again:
499         for (i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
500                                       tlevel->entry, tlevel->total_buckets);
501              i != tlevel->total_buckets;
502              i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
503                                       i+1, tlevel->total_buckets)) {
504                 val = tdb_read_off(tdb, tlevel->hashtable+sizeof(tdb_off_t)*i);
505                 if (unlikely(val == TDB_OFF_ERR))
506                         return TDB_OFF_ERR;
507
508                 off = val & TDB_OFF_MASK;
509
510                 /* This makes the delete-all-in-traverse case work
511                  * (and simplifies our logic a little). */
512                 if (off == tinfo->prev)
513                         continue;
514
515                 tlevel->entry = i;
516
517                 if (!is_subhash(val)) {
518                         /* Found one. */
519                         tinfo->prev = off;
520                         return off;
521                 }
522
523                 /* When we come back, we want the next one */
524                 tlevel->entry++;
525                 tinfo->num_levels++;
526                 tlevel++;
527                 tlevel->hashtable = off + sizeof(struct tdb_used_record);
528                 tlevel->entry = 0;
529                 tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
530                 goto again;
531         }
532
533         /* Nothing there? */
534         if (tinfo->num_levels == 1)
535                 return 0;
536
537         /* Go back up and keep searching. */
538         tinfo->num_levels--;
539         tlevel--;
540         goto again;
541 }
542
543 /* Return 1 if we find something, 0 if not, -1 on error. */
544 int next_in_hash(struct tdb_context *tdb, int ltype,
545                  struct traverse_info *tinfo,
546                  TDB_DATA *kbuf, size_t *dlen)
547 {
548         const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
549         tdb_off_t hlock_start, hlock_range, off;
550
551         while (tinfo->toplevel_group < (1 << group_bits)) {
552                 hlock_start = (tdb_off_t)tinfo->toplevel_group
553                         << (64 - group_bits);
554                 hlock_range = 1ULL << group_bits;
555                 if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype,
556                                     TDB_LOCK_WAIT) != 0)
557                         return -1;
558
559                 off = iterate_hash(tdb, tinfo);
560                 if (off) {
561                         struct tdb_used_record rec;
562
563                         if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) {
564                                 tdb_unlock_hashes(tdb,
565                                                   hlock_start, hlock_range,
566                                                   ltype);
567                                 return -1;
568                         }
569                         if (rec_magic(&rec) != TDB_MAGIC) {
570                                 tdb->log(tdb, TDB_DEBUG_FATAL, tdb->log_priv,
571                                          "next_in_hash:"
572                                          " corrupt record at %llu\n",
573                                          (long long)off);
574                                 return -1;
575                         }
576
577                         kbuf->dsize = rec_key_length(&rec);
578
579                         /* They want data as well? */
580                         if (dlen) {
581                                 *dlen = rec_data_length(&rec);
582                                 kbuf->dptr = tdb_alloc_read(tdb, 
583                                                             off + sizeof(rec),
584                                                             kbuf->dsize
585                                                             + *dlen);
586                         } else {
587                                 kbuf->dptr = tdb_alloc_read(tdb, 
588                                                             off + sizeof(rec),
589                                                             kbuf->dsize);
590                         }
591                         tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
592                         return kbuf->dptr ? 1 : -1;
593                 }
594
595                 tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
596
597                 tinfo->toplevel_group++;
598                 tinfo->levels[0].hashtable
599                         += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
600                 tinfo->levels[0].entry = 0;
601         }
602         return 0;
603 }
604
605 /* Return 1 if we find something, 0 if not, -1 on error. */
606 int first_in_hash(struct tdb_context *tdb, int ltype,
607                   struct traverse_info *tinfo,
608                   TDB_DATA *kbuf, size_t *dlen)
609 {
610         tinfo->prev = 0;
611         tinfo->toplevel_group = 0;
612         tinfo->num_levels = 1;
613         tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
614         tinfo->levels[0].entry = 0;
615         tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
616
617         return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
618 }
619
620 /* Even if the entry isn't in this hash bucket, you'd have to lock this
621  * bucket to find it. */
622 static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
623                      int ltype, enum tdb_lock_flags waitflag,
624                      const char *func)
625 {
626         int ret;
627         uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
628         tdb_off_t lockstart, locksize;
629         unsigned int group, gbits;
630
631         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
632         group = bits(h, 64 - gbits, gbits);
633
634         lockstart = hlock_range(group, &locksize);
635
636         ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
637         tdb_trace_1rec(tdb, func, *key);
638         return ret;
639 }
640
641 /* lock/unlock one hash chain. This is meant to be used to reduce
642    contention - it cannot guarantee how many records will be locked */
643 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
644 {
645         return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
646 }
647
648 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
649 {
650         uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
651         tdb_off_t lockstart, locksize;
652         unsigned int group, gbits;
653
654         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
655         group = bits(h, 64 - gbits, gbits);
656
657         lockstart = hlock_range(group, &locksize);
658
659         tdb_trace_1rec(tdb, "tdb_chainunlock", key);
660         return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
661 }