]> git.ozlabs.org Git - ccan/blob - ccan/tdb2/hash.c
7ade36d35d24521c8e62645b33da35d9d373fda4
[ccan] / ccan / tdb2 / hash.c
1  /* 
2    Trivial Database 2: hash handling
3    Copyright (C) Rusty Russell 2010
4    
5    This library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 3 of the License, or (at your option) any later version.
9
10    This library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with this library; if not, see <http://www.gnu.org/licenses/>.
17 */
18 #include "private.h"
19 #include <assert.h>
20 #include <ccan/hash/hash.h>
21
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
23                              void *arg)
24 {
25         uint64_t ret;
26         /* hash64_stable assumes lower bits are more important; they are a
27          * slightly better hash.  We use the upper bits first, so swap them. */
28         ret = hash64_stable((const unsigned char *)key, length, seed);
29         return (ret >> 32) | (ret << 32);
30 }
31
32 void tdb_hash_init(struct tdb_context *tdb)
33 {
34         tdb->khash = jenkins_hash;
35         tdb->hash_priv = NULL;
36 }
37
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
39 {
40         return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
41 }
42
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
44 {
45         const struct tdb_used_record *r;
46         const void *key;
47         uint64_t klen, hash;
48
49         r = tdb_access_read(tdb, off, sizeof(*r), true);
50         if (!r)
51                 /* FIXME */
52                 return 0;
53
54         klen = rec_key_length(r);
55         tdb_access_release(tdb, r);
56
57         key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
58         if (!key)
59                 return 0;
60
61         hash = tdb_hash(tdb, key, klen);
62         tdb_access_release(tdb, key);
63         return hash;
64 }
65
66 /* Get bits from a value. */
67 static uint32_t bits(uint64_t val, unsigned start, unsigned num)
68 {
69         assert(num <= 32);
70         return (val >> start) & ((1U << num) - 1);
71 }
72
73 /* We take bits from the top: that way we can lock whole sections of the hash
74  * by using lock ranges. */
75 static uint32_t use_bits(struct hash_info *h, unsigned num)
76 {
77         h->hash_used += num;
78         return bits(h->h, 64 - h->hash_used, num);
79 }
80
81 /* Does entry match? */
82 static bool match(struct tdb_context *tdb,
83                   struct hash_info *h,
84                   const struct tdb_data *key,
85                   tdb_off_t val,
86                   struct tdb_used_record *rec)
87 {
88         bool ret = false;
89         const unsigned char *rkey;
90         tdb_off_t off;
91
92         add_stat(tdb, compares, 1);
93         /* Desired bucket must match. */
94         if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
95                 add_stat(tdb, compare_wrong_bucket, 1);
96                 return ret;
97         }
98
99         /* Top bits of offset == next bits of hash. */
100         if (bits(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
101             != bits(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
102                     TDB_OFF_UPPER_STEAL_EXTRA)) {
103                 add_stat(tdb, compare_wrong_offsetbits, 1);
104                 return ret;
105         }
106
107         off = val & TDB_OFF_MASK;
108         if (tdb_read_convert(tdb, off, rec, sizeof(*rec)) == -1)
109                 return ret;
110
111         if (rec_key_length(rec) != key->dsize) {
112                 add_stat(tdb, compare_wrong_keylen, 1);
113                 return ret;
114         }
115
116         if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
117                 add_stat(tdb, compare_wrong_rechash, 1);
118                 return false;
119         }
120
121         rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
122         if (!rkey)
123                 return ret;
124         if (memcmp(rkey, key->dptr, key->dsize) == 0)
125                 ret = true;
126         else
127                 add_stat(tdb, compare_wrong_keycmp, 1);
128         tdb_access_release(tdb, rkey);
129         return ret;
130 }
131
132 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
133 {
134         return group_start
135                 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
136 }
137
138 bool is_subhash(tdb_off_t val)
139 {
140         return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
141 }
142
143 /* FIXME: Guess the depth, don't over-lock! */
144 static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
145 {
146         *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
147         return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
148 }
149
150 /* This is the core routine which searches the hashtable for an entry.
151  * On error, no locks are held and TDB_OFF_ERR is returned.
152  * Otherwise, hinfo is filled in (and the optional tinfo).
153  * If not found, the return value is 0.
154  * If found, the return value is the offset, and *rec is the record. */
155 tdb_off_t find_and_lock(struct tdb_context *tdb,
156                         struct tdb_data key,
157                         int ltype,
158                         struct hash_info *h,
159                         struct tdb_used_record *rec,
160                         struct traverse_info *tinfo)
161 {
162         uint32_t i, group;
163         tdb_off_t hashtable;
164
165         h->h = tdb_hash(tdb, key.dptr, key.dsize);
166         h->hash_used = 0;
167         group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
168         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
169
170         h->hlock_start = hlock_range(group, &h->hlock_range);
171         if (tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
172                             TDB_LOCK_WAIT))
173                 return TDB_OFF_ERR;
174
175         hashtable = offsetof(struct tdb_header, hashtable);
176         if (tinfo) {
177                 tinfo->toplevel_group = group;
178                 tinfo->num_levels = 1;
179                 tinfo->levels[0].entry = 0;
180                 tinfo->levels[0].hashtable = hashtable 
181                         + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t);
182                 tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS;
183         }
184
185         while (likely(h->hash_used < 64)) {
186                 /* Read in the hash group. */
187                 h->group_start = hashtable
188                         + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
189
190                 if (tdb_read_convert(tdb, h->group_start, &h->group,
191                                      sizeof(h->group)) == -1)
192                         goto fail;
193
194                 /* Pointer to another hash table?  Go down... */
195                 if (is_subhash(h->group[h->home_bucket])) {
196                         hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
197                                 + sizeof(struct tdb_used_record);
198                         if (tinfo) {
199                                 /* When we come back, use *next* bucket */
200                                 tinfo->levels[tinfo->num_levels-1].entry
201                                         += h->home_bucket + 1;
202                         }
203                         group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
204                                          - TDB_HASH_GROUP_BITS);
205                         h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
206                         if (tinfo) {
207                                 tinfo->levels[tinfo->num_levels].hashtable
208                                         = hashtable;
209                                 tinfo->levels[tinfo->num_levels].total_buckets
210                                         = 1 << TDB_SUBLEVEL_HASH_BITS;
211                                 tinfo->levels[tinfo->num_levels].entry
212                                         = group << TDB_HASH_GROUP_BITS;
213                                 tinfo->num_levels++;
214                         }
215                         continue;
216                 }
217
218                 /* It's in this group: search (until 0 or all searched) */
219                 for (i = 0, h->found_bucket = h->home_bucket;
220                      i < (1 << TDB_HASH_GROUP_BITS);
221                      i++, h->found_bucket = ((h->found_bucket+1)
222                                              % (1 << TDB_HASH_GROUP_BITS))) {
223                         if (is_subhash(h->group[h->found_bucket]))
224                                 continue;
225
226                         if (!h->group[h->found_bucket])
227                                 break;
228
229                         if (match(tdb, h, &key, h->group[h->found_bucket],
230                                   rec)) {
231                                 if (tinfo) {
232                                         tinfo->levels[tinfo->num_levels-1].entry
233                                                 += h->found_bucket;
234                                 }
235                                 return h->group[h->found_bucket] & TDB_OFF_MASK;
236                         }
237                 }
238                 /* Didn't find it: h indicates where it would go. */
239                 return 0;
240         }
241
242         /* FIXME: We hit the bottom.  Chain! */
243         abort();
244
245 fail:
246         tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
247         return TDB_OFF_ERR;
248 }
249
250 /* I wrote a simple test, expanding a hash to 2GB, for the following
251  * cases:
252  * 1) Expanding all the buckets at once,
253  * 2) Expanding the bucket we wanted to place the new entry into.
254  * 3) Expanding the most-populated bucket,
255  *
256  * I measured the worst/average/best density during this process.
257  * 1) 3%/16%/30%
258  * 2) 4%/20%/38%
259  * 3) 6%/22%/41%
260  *
261  * So we figure out the busiest bucket for the moment.
262  */
263 static unsigned fullest_bucket(struct tdb_context *tdb,
264                                const tdb_off_t *group,
265                                unsigned new_bucket)
266 {
267         unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
268         unsigned int i, best_bucket;
269
270         /* Count the new entry. */
271         counts[new_bucket]++;
272         best_bucket = new_bucket;
273
274         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
275                 unsigned this_bucket;
276
277                 if (is_subhash(group[i]))
278                         continue;
279                 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
280                 if (++counts[this_bucket] > counts[best_bucket])
281                         best_bucket = this_bucket;
282         }
283
284         return best_bucket;
285 }
286
287 static bool put_into_group(tdb_off_t *group,
288                            unsigned bucket, tdb_off_t encoded)
289 {
290         unsigned int i;
291
292         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
293                 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
294
295                 if (group[b] == 0) {
296                         group[b] = encoded;
297                         return true;
298                 }
299         }
300         return false;
301 }
302
303 static void force_into_group(tdb_off_t *group,
304                              unsigned bucket, tdb_off_t encoded)
305 {
306         if (!put_into_group(group, bucket, encoded))
307                 abort();
308 }
309
310 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
311 {
312         return h->home_bucket
313                 | new_off
314                 | ((uint64_t)bits(h->h,
315                                   64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
316                                   TDB_OFF_UPPER_STEAL_EXTRA)
317                    << TDB_OFF_HASH_EXTRA_BIT);
318 }
319
320 /* Simply overwrite the hash entry we found before. */ 
321 int replace_in_hash(struct tdb_context *tdb,
322                     struct hash_info *h,
323                     tdb_off_t new_off)
324 {
325         return tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
326                              encode_offset(new_off, h));
327 }
328
329 /* Add into a newly created subhash. */
330 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
331                           unsigned hash_used, tdb_off_t val)
332 {
333         tdb_off_t off = (val & TDB_OFF_MASK), *group;
334         struct hash_info h;
335         unsigned int gnum;
336
337         h.hash_used = hash_used;
338
339         /* FIXME chain if hash_used == 64 */
340         if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
341                 abort();
342
343         h.h = hash_record(tdb, off);
344         gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
345         h.group_start = subhash + sizeof(struct tdb_used_record)
346                 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
347         h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
348
349         group = tdb_access_write(tdb, h.group_start,
350                                  sizeof(*group) << TDB_HASH_GROUP_BITS, true);
351         if (!group)
352                 return -1;
353         force_into_group(group, h.home_bucket, encode_offset(off, &h));
354         return tdb_access_commit(tdb, group);
355 }
356
357 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
358 {
359         unsigned bucket, num_vals, i;
360         tdb_off_t subhash;
361         tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
362
363         /* Attach new empty subhash under fullest bucket. */
364         bucket = fullest_bucket(tdb, h->group, h->home_bucket);
365
366         subhash = alloc(tdb, 0, sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS,
367                         0, false);
368         if (subhash == TDB_OFF_ERR)
369                 return -1;
370
371         add_stat(tdb, alloc_subhash, 1);
372         if (zero_out(tdb, subhash + sizeof(struct tdb_used_record),
373                      sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS) == -1)
374                 return -1;
375
376         /* Remove any which are destined for bucket or are in wrong place. */
377         num_vals = 0;
378         for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
379                 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
380                 if (!h->group[i] || is_subhash(h->group[i]))
381                         continue;
382                 if (home_bucket == bucket || home_bucket != i) {
383                         vals[num_vals++] = h->group[i];
384                         h->group[i] = 0;
385                 }
386         }
387         /* FIXME: This assert is valid, but we do this during unit test :( */
388         /* assert(num_vals); */
389
390         /* Overwrite expanded bucket with subhash pointer. */
391         h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
392
393         /* Put values back. */
394         for (i = 0; i < num_vals; i++) {
395                 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
396
397                 if (this_bucket == bucket) {
398                         if (add_to_subhash(tdb, subhash, h->hash_used, vals[i]))
399                                 return -1;
400                 } else {
401                         /* There should be room to put this back. */
402                         force_into_group(h->group, this_bucket, vals[i]);
403                 }
404         }
405         return 0;
406 }
407
408 int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
409 {
410         unsigned int i, num_movers = 0;
411         tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
412
413         h->group[h->found_bucket] = 0;
414         for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
415                 unsigned this_bucket;
416
417                 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
418                 /* Empty bucket?  We're done. */
419                 if (!h->group[this_bucket])
420                         break;
421
422                 /* Ignore subhashes. */
423                 if (is_subhash(h->group[this_bucket]))
424                         continue;
425
426                 /* If this one is not happy where it is, we'll move it. */
427                 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
428                     != this_bucket) {
429                         movers[num_movers++] = h->group[this_bucket];
430                         h->group[this_bucket] = 0;
431                 }
432         }
433
434         /* Put back the ones we erased. */
435         for (i = 0; i < num_movers; i++) {
436                 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
437                                  movers[i]);
438         }
439
440         /* Now we write back the hash group */
441         return tdb_write_convert(tdb, h->group_start,
442                                  h->group, sizeof(h->group));
443 }
444
445 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
446 {
447         /* FIXME: chain! */
448         if (h->hash_used >= 64)
449                 abort();
450
451         /* We hit an empty bucket during search?  That's where it goes. */
452         if (!h->group[h->found_bucket]) {
453                 h->group[h->found_bucket] = encode_offset(new_off, h);
454                 /* Write back the modified group. */
455                 return tdb_write_convert(tdb, h->group_start,
456                                          h->group, sizeof(h->group));
457         }
458
459         /* We're full.  Expand. */
460         if (expand_group(tdb, h) == -1)
461                 return -1;
462
463         if (is_subhash(h->group[h->home_bucket])) {
464                 /* We were expanded! */
465                 tdb_off_t hashtable;
466                 unsigned int gnum;
467
468                 /* Write back the modified group. */
469                 if (tdb_write_convert(tdb, h->group_start, h->group,
470                                       sizeof(h->group)))
471                         return -1;
472
473                 /* Move hashinfo down a level. */
474                 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
475                         + sizeof(struct tdb_used_record);
476                 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
477                 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
478                 h->group_start = hashtable
479                         + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
480                 if (tdb_read_convert(tdb, h->group_start, &h->group,
481                                      sizeof(h->group)) == -1)
482                         return -1;
483         }
484
485         /* Expanding the group must have made room if it didn't choose this
486          * bucket. */
487         if (put_into_group(h->group, h->home_bucket, encode_offset(new_off, h)))
488                 return tdb_write_convert(tdb, h->group_start,
489                                          h->group, sizeof(h->group));
490
491         /* This can happen if all hashes in group (and us) dropped into same
492          * group in subhash. */
493         return add_to_hash(tdb, h, new_off);
494 }
495
496 /* Traverse support: returns offset of record, or 0 or TDB_OFF_ERR. */
497 static tdb_off_t iterate_hash(struct tdb_context *tdb,
498                               struct traverse_info *tinfo)
499 {
500         tdb_off_t off, val;
501         unsigned int i;
502         struct traverse_level *tlevel;
503
504         tlevel = &tinfo->levels[tinfo->num_levels-1];
505
506 again:
507         for (i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
508                                       tlevel->entry, tlevel->total_buckets);
509              i != tlevel->total_buckets;
510              i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
511                                       i+1, tlevel->total_buckets)) {
512                 val = tdb_read_off(tdb, tlevel->hashtable+sizeof(tdb_off_t)*i);
513                 if (unlikely(val == TDB_OFF_ERR))
514                         return TDB_OFF_ERR;
515
516                 off = val & TDB_OFF_MASK;
517
518                 /* This makes the delete-all-in-traverse case work
519                  * (and simplifies our logic a little). */
520                 if (off == tinfo->prev)
521                         continue;
522
523                 tlevel->entry = i;
524
525                 if (!is_subhash(val)) {
526                         /* Found one. */
527                         tinfo->prev = off;
528                         return off;
529                 }
530
531                 /* When we come back, we want the next one */
532                 tlevel->entry++;
533                 tinfo->num_levels++;
534                 tlevel++;
535                 tlevel->hashtable = off + sizeof(struct tdb_used_record);
536                 tlevel->entry = 0;
537                 tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
538                 goto again;
539         }
540
541         /* Nothing there? */
542         if (tinfo->num_levels == 1)
543                 return 0;
544
545         /* Go back up and keep searching. */
546         tinfo->num_levels--;
547         tlevel--;
548         goto again;
549 }
550
551 /* Return 1 if we find something, 0 if not, -1 on error. */
552 int next_in_hash(struct tdb_context *tdb, int ltype,
553                  struct traverse_info *tinfo,
554                  TDB_DATA *kbuf, size_t *dlen)
555 {
556         const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
557         tdb_off_t hlock_start, hlock_range, off;
558
559         while (tinfo->toplevel_group < (1 << group_bits)) {
560                 hlock_start = (tdb_off_t)tinfo->toplevel_group
561                         << (64 - group_bits);
562                 hlock_range = 1ULL << group_bits;
563                 if (tdb_lock_hashes(tdb, hlock_start, hlock_range, ltype,
564                                     TDB_LOCK_WAIT) != 0)
565                         return -1;
566
567                 off = iterate_hash(tdb, tinfo);
568                 if (off) {
569                         struct tdb_used_record rec;
570
571                         if (tdb_read_convert(tdb, off, &rec, sizeof(rec))) {
572                                 tdb_unlock_hashes(tdb,
573                                                   hlock_start, hlock_range,
574                                                   ltype);
575                                 return -1;
576                         }
577                         if (rec_magic(&rec) != TDB_MAGIC) {
578                                 tdb_logerr(tdb, TDB_ERR_CORRUPT,
579                                            TDB_DEBUG_FATAL,
580                                            "next_in_hash:"
581                                            " corrupt record at %llu",
582                                            (long long)off);
583                                 return -1;
584                         }
585
586                         kbuf->dsize = rec_key_length(&rec);
587
588                         /* They want data as well? */
589                         if (dlen) {
590                                 *dlen = rec_data_length(&rec);
591                                 kbuf->dptr = tdb_alloc_read(tdb, 
592                                                             off + sizeof(rec),
593                                                             kbuf->dsize
594                                                             + *dlen);
595                         } else {
596                                 kbuf->dptr = tdb_alloc_read(tdb, 
597                                                             off + sizeof(rec),
598                                                             kbuf->dsize);
599                         }
600                         tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
601                         return kbuf->dptr ? 1 : -1;
602                 }
603
604                 tdb_unlock_hashes(tdb, hlock_start, hlock_range, ltype);
605
606                 tinfo->toplevel_group++;
607                 tinfo->levels[0].hashtable
608                         += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
609                 tinfo->levels[0].entry = 0;
610         }
611         return 0;
612 }
613
614 /* Return 1 if we find something, 0 if not, -1 on error. */
615 int first_in_hash(struct tdb_context *tdb, int ltype,
616                   struct traverse_info *tinfo,
617                   TDB_DATA *kbuf, size_t *dlen)
618 {
619         tinfo->prev = 0;
620         tinfo->toplevel_group = 0;
621         tinfo->num_levels = 1;
622         tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
623         tinfo->levels[0].entry = 0;
624         tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
625
626         return next_in_hash(tdb, ltype, tinfo, kbuf, dlen);
627 }
628
629 /* Even if the entry isn't in this hash bucket, you'd have to lock this
630  * bucket to find it. */
631 static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
632                      int ltype, enum tdb_lock_flags waitflag,
633                      const char *func)
634 {
635         int ret;
636         uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
637         tdb_off_t lockstart, locksize;
638         unsigned int group, gbits;
639
640         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
641         group = bits(h, 64 - gbits, gbits);
642
643         lockstart = hlock_range(group, &locksize);
644
645         ret = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
646         tdb_trace_1rec(tdb, func, *key);
647         return ret;
648 }
649
650 /* lock/unlock one hash chain. This is meant to be used to reduce
651    contention - it cannot guarantee how many records will be locked */
652 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
653 {
654         return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
655 }
656
657 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
658 {
659         uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
660         tdb_off_t lockstart, locksize;
661         unsigned int group, gbits;
662
663         gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
664         group = bits(h, 64 - gbits, gbits);
665
666         lockstart = hlock_range(group, &locksize);
667
668         tdb_trace_1rec(tdb, "tdb_chainunlock", key);
669         return tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
670 }