2 Trivial Database 2: hash handling
3 Copyright (C) Rusty Russell 2010
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 3 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <ccan/hash/hash.h>
22 static uint64_t jenkins_hash(const void *key, size_t length, uint64_t seed,
26 /* hash64_stable assumes lower bits are more important; they are a
27 * slightly better hash. We use the upper bits first, so swap them. */
28 ret = hash64_stable((const unsigned char *)key, length, seed);
29 return (ret >> 32) | (ret << 32);
32 void tdb_hash_init(struct tdb_context *tdb)
34 tdb->khash = jenkins_hash;
35 tdb->hash_priv = NULL;
38 uint64_t tdb_hash(struct tdb_context *tdb, const void *ptr, size_t len)
40 return tdb->khash(ptr, len, tdb->hash_seed, tdb->hash_priv);
43 uint64_t hash_record(struct tdb_context *tdb, tdb_off_t off)
45 const struct tdb_used_record *r;
49 r = tdb_access_read(tdb, off, sizeof(*r), true);
54 klen = rec_key_length(r);
55 tdb_access_release(tdb, r);
57 key = tdb_access_read(tdb, off + sizeof(*r), klen, false);
61 hash = tdb_hash(tdb, key, klen);
62 tdb_access_release(tdb, key);
66 /* Get bits from a value. */
67 static uint32_t bits_from(uint64_t val, unsigned start, unsigned num)
70 return (val >> start) & ((1U << num) - 1);
73 /* We take bits from the top: that way we can lock whole sections of the hash
74 * by using lock ranges. */
75 static uint32_t use_bits(struct hash_info *h, unsigned num)
78 return bits_from(h->h, 64 - h->hash_used, num);
81 static bool key_matches(struct tdb_context *tdb,
82 const struct tdb_used_record *rec,
84 const struct tdb_data *key)
89 if (rec_key_length(rec) != key->dsize) {
90 add_stat(tdb, compare_wrong_keylen, 1);
94 rkey = tdb_access_read(tdb, off + sizeof(*rec), key->dsize, false);
97 if (memcmp(rkey, key->dptr, key->dsize) == 0)
100 add_stat(tdb, compare_wrong_keycmp, 1);
101 tdb_access_release(tdb, rkey);
105 /* Does entry match? */
106 static bool match(struct tdb_context *tdb,
108 const struct tdb_data *key,
110 struct tdb_used_record *rec)
113 enum TDB_ERROR ecode;
115 add_stat(tdb, compares, 1);
116 /* Desired bucket must match. */
117 if (h->home_bucket != (val & TDB_OFF_HASH_GROUP_MASK)) {
118 add_stat(tdb, compare_wrong_bucket, 1);
122 /* Top bits of offset == next bits of hash. */
123 if (bits_from(val, TDB_OFF_HASH_EXTRA_BIT, TDB_OFF_UPPER_STEAL_EXTRA)
124 != bits_from(h->h, 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
125 TDB_OFF_UPPER_STEAL_EXTRA)) {
126 add_stat(tdb, compare_wrong_offsetbits, 1);
130 off = val & TDB_OFF_MASK;
131 ecode = tdb_read_convert(tdb, off, rec, sizeof(*rec));
132 if (ecode != TDB_SUCCESS) {
137 if ((h->h & ((1 << 11)-1)) != rec_hash(rec)) {
138 add_stat(tdb, compare_wrong_rechash, 1);
142 return key_matches(tdb, rec, off, key);
145 static tdb_off_t hbucket_off(tdb_off_t group_start, unsigned bucket)
148 + (bucket % (1 << TDB_HASH_GROUP_BITS)) * sizeof(tdb_off_t);
151 bool is_subhash(tdb_off_t val)
153 return (val >> TDB_OFF_UPPER_STEAL_SUBHASH_BIT) & 1;
156 /* FIXME: Guess the depth, don't over-lock! */
157 static tdb_off_t hlock_range(tdb_off_t group, tdb_off_t *size)
159 *size = 1ULL << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
160 return group << (64 - (TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS));
163 static tdb_off_t COLD find_in_chain(struct tdb_context *tdb,
167 struct tdb_used_record *rec,
168 struct traverse_info *tinfo)
171 enum TDB_ERROR ecode;
173 /* In case nothing is free, we set these to zero. */
174 h->home_bucket = h->found_bucket = 0;
176 for (off = chain; off; off = next) {
179 h->group_start = off;
180 ecode = tdb_read_convert(tdb, off, h->group, sizeof(h->group));
181 if (ecode != TDB_SUCCESS) {
186 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
189 /* Remember this empty bucket. */
190 h->home_bucket = h->found_bucket = i;
194 /* We can insert extra bits via add_to_hash
195 * empty bucket logic. */
196 recoff = h->group[i] & TDB_OFF_MASK;
197 ecode = tdb_read_convert(tdb, recoff, rec,
199 if (ecode != TDB_SUCCESS) {
204 if (key_matches(tdb, rec, recoff, &key)) {
205 h->home_bucket = h->found_bucket = i;
208 tinfo->levels[tinfo->num_levels]
210 tinfo->levels[tinfo->num_levels]
212 = 1 << TDB_HASH_GROUP_BITS;
213 tinfo->levels[tinfo->num_levels].entry
220 next = tdb_read_off(tdb, off
221 + offsetof(struct tdb_chain, next));
222 if (next == TDB_OFF_ERR)
225 next += sizeof(struct tdb_used_record);
230 /* This is the core routine which searches the hashtable for an entry.
231 * On error, no locks are held and TDB_OFF_ERR is returned.
232 * Otherwise, hinfo is filled in (and the optional tinfo).
233 * If not found, the return value is 0.
234 * If found, the return value is the offset, and *rec is the record. */
235 tdb_off_t find_and_lock(struct tdb_context *tdb,
239 struct tdb_used_record *rec,
240 struct traverse_info *tinfo)
244 enum TDB_ERROR ecode;
246 h->h = tdb_hash(tdb, key.dptr, key.dsize);
248 group = use_bits(h, TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
249 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
251 h->hlock_start = hlock_range(group, &h->hlock_range);
252 ecode = tdb_lock_hashes(tdb, h->hlock_start, h->hlock_range, ltype,
254 if (ecode != TDB_SUCCESS) {
259 hashtable = offsetof(struct tdb_header, hashtable);
261 tinfo->toplevel_group = group;
262 tinfo->num_levels = 1;
263 tinfo->levels[0].entry = 0;
264 tinfo->levels[0].hashtable = hashtable
265 + (group << TDB_HASH_GROUP_BITS) * sizeof(tdb_off_t);
266 tinfo->levels[0].total_buckets = 1 << TDB_HASH_GROUP_BITS;
269 while (h->hash_used <= 64) {
270 /* Read in the hash group. */
271 h->group_start = hashtable
272 + group * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
274 ecode = tdb_read_convert(tdb, h->group_start, &h->group,
276 if (ecode != TDB_SUCCESS) {
281 /* Pointer to another hash table? Go down... */
282 if (is_subhash(h->group[h->home_bucket])) {
283 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
284 + sizeof(struct tdb_used_record);
286 /* When we come back, use *next* bucket */
287 tinfo->levels[tinfo->num_levels-1].entry
288 += h->home_bucket + 1;
290 group = use_bits(h, TDB_SUBLEVEL_HASH_BITS
291 - TDB_HASH_GROUP_BITS);
292 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
294 tinfo->levels[tinfo->num_levels].hashtable
296 tinfo->levels[tinfo->num_levels].total_buckets
297 = 1 << TDB_SUBLEVEL_HASH_BITS;
298 tinfo->levels[tinfo->num_levels].entry
299 = group << TDB_HASH_GROUP_BITS;
305 /* It's in this group: search (until 0 or all searched) */
306 for (i = 0, h->found_bucket = h->home_bucket;
307 i < (1 << TDB_HASH_GROUP_BITS);
308 i++, h->found_bucket = ((h->found_bucket+1)
309 % (1 << TDB_HASH_GROUP_BITS))) {
310 if (is_subhash(h->group[h->found_bucket]))
313 if (!h->group[h->found_bucket])
316 if (match(tdb, h, &key, h->group[h->found_bucket],
319 tinfo->levels[tinfo->num_levels-1].entry
322 return h->group[h->found_bucket] & TDB_OFF_MASK;
325 /* Didn't find it: h indicates where it would go. */
329 return find_in_chain(tdb, key, hashtable, h, rec, tinfo);
332 tdb_unlock_hashes(tdb, h->hlock_start, h->hlock_range, ltype);
336 /* I wrote a simple test, expanding a hash to 2GB, for the following
338 * 1) Expanding all the buckets at once,
339 * 2) Expanding the bucket we wanted to place the new entry into.
340 * 3) Expanding the most-populated bucket,
342 * I measured the worst/average/best density during this process.
347 * So we figure out the busiest bucket for the moment.
349 static unsigned fullest_bucket(struct tdb_context *tdb,
350 const tdb_off_t *group,
353 unsigned counts[1 << TDB_HASH_GROUP_BITS] = { 0 };
354 unsigned int i, best_bucket;
356 /* Count the new entry. */
357 counts[new_bucket]++;
358 best_bucket = new_bucket;
360 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
361 unsigned this_bucket;
363 if (is_subhash(group[i]))
365 this_bucket = group[i] & TDB_OFF_HASH_GROUP_MASK;
366 if (++counts[this_bucket] > counts[best_bucket])
367 best_bucket = this_bucket;
373 static bool put_into_group(tdb_off_t *group,
374 unsigned bucket, tdb_off_t encoded)
378 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
379 unsigned b = (bucket + i) % (1 << TDB_HASH_GROUP_BITS);
389 static void force_into_group(tdb_off_t *group,
390 unsigned bucket, tdb_off_t encoded)
392 if (!put_into_group(group, bucket, encoded))
396 static tdb_off_t encode_offset(tdb_off_t new_off, struct hash_info *h)
398 return h->home_bucket
400 | ((uint64_t)bits_from(h->h,
401 64 - h->hash_used - TDB_OFF_UPPER_STEAL_EXTRA,
402 TDB_OFF_UPPER_STEAL_EXTRA)
403 << TDB_OFF_HASH_EXTRA_BIT);
406 /* Simply overwrite the hash entry we found before. */
407 int replace_in_hash(struct tdb_context *tdb,
411 enum TDB_ERROR ecode;
413 ecode = tdb_write_off(tdb, hbucket_off(h->group_start, h->found_bucket),
414 encode_offset(new_off, h));
415 if (ecode != TDB_SUCCESS) {
422 /* We slot in anywhere that's empty in the chain. */
423 static int COLD add_to_chain(struct tdb_context *tdb,
427 size_t entry = tdb_find_zero_off(tdb, subhash, 1<<TDB_HASH_GROUP_BITS);
428 enum TDB_ERROR ecode;
430 if (entry == 1 << TDB_HASH_GROUP_BITS) {
433 next = tdb_read_off(tdb, subhash
434 + offsetof(struct tdb_chain, next));
435 if (next == TDB_OFF_ERR)
439 next = alloc(tdb, 0, sizeof(struct tdb_chain), 0,
440 TDB_CHAIN_MAGIC, false);
441 if (next == TDB_OFF_ERR)
443 ecode = zero_out(tdb,
444 next+sizeof(struct tdb_used_record),
445 sizeof(struct tdb_chain));
446 if (ecode != TDB_SUCCESS) {
450 ecode = tdb_write_off(tdb, subhash
451 + offsetof(struct tdb_chain,
454 if (ecode != TDB_SUCCESS) {
459 return add_to_chain(tdb, next, new_off);
462 ecode = tdb_write_off(tdb, subhash + entry * sizeof(tdb_off_t),
464 if (ecode != TDB_SUCCESS) {
471 /* Add into a newly created subhash. */
472 static int add_to_subhash(struct tdb_context *tdb, tdb_off_t subhash,
473 unsigned hash_used, tdb_off_t val)
475 tdb_off_t off = (val & TDB_OFF_MASK), *group;
478 enum TDB_ERROR ecode;
480 h.hash_used = hash_used;
482 if (hash_used + TDB_SUBLEVEL_HASH_BITS > 64)
483 return add_to_chain(tdb, subhash, off);
485 h.h = hash_record(tdb, off);
486 gnum = use_bits(&h, TDB_SUBLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS);
487 h.group_start = subhash
488 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
489 h.home_bucket = use_bits(&h, TDB_HASH_GROUP_BITS);
491 group = tdb_access_write(tdb, h.group_start,
492 sizeof(*group) << TDB_HASH_GROUP_BITS, true);
495 force_into_group(group, h.home_bucket, encode_offset(off, &h));
496 ecode = tdb_access_commit(tdb, group);
497 if (ecode != TDB_SUCCESS) {
504 static int expand_group(struct tdb_context *tdb, struct hash_info *h)
506 unsigned bucket, num_vals, i, magic;
509 tdb_off_t vals[1 << TDB_HASH_GROUP_BITS];
510 enum TDB_ERROR ecode;
512 /* Attach new empty subhash under fullest bucket. */
513 bucket = fullest_bucket(tdb, h->group, h->home_bucket);
515 if (h->hash_used == 64) {
516 add_stat(tdb, alloc_chain, 1);
517 subsize = sizeof(struct tdb_chain);
518 magic = TDB_CHAIN_MAGIC;
520 add_stat(tdb, alloc_subhash, 1);
521 subsize = (sizeof(tdb_off_t) << TDB_SUBLEVEL_HASH_BITS);
522 magic = TDB_HTABLE_MAGIC;
525 subhash = alloc(tdb, 0, subsize, 0, magic, false);
526 if (subhash == TDB_OFF_ERR)
529 ecode = zero_out(tdb, subhash + sizeof(struct tdb_used_record),
531 if (ecode != TDB_SUCCESS) {
536 /* Remove any which are destined for bucket or are in wrong place. */
538 for (i = 0; i < (1 << TDB_HASH_GROUP_BITS); i++) {
539 unsigned home_bucket = h->group[i] & TDB_OFF_HASH_GROUP_MASK;
540 if (!h->group[i] || is_subhash(h->group[i]))
542 if (home_bucket == bucket || home_bucket != i) {
543 vals[num_vals++] = h->group[i];
547 /* FIXME: This assert is valid, but we do this during unit test :( */
548 /* assert(num_vals); */
550 /* Overwrite expanded bucket with subhash pointer. */
551 h->group[bucket] = subhash | (1ULL << TDB_OFF_UPPER_STEAL_SUBHASH_BIT);
553 /* Point to actual contents of record. */
554 subhash += sizeof(struct tdb_used_record);
556 /* Put values back. */
557 for (i = 0; i < num_vals; i++) {
558 unsigned this_bucket = vals[i] & TDB_OFF_HASH_GROUP_MASK;
560 if (this_bucket == bucket) {
561 if (add_to_subhash(tdb, subhash, h->hash_used, vals[i]))
564 /* There should be room to put this back. */
565 force_into_group(h->group, this_bucket, vals[i]);
571 int delete_from_hash(struct tdb_context *tdb, struct hash_info *h)
573 unsigned int i, num_movers = 0;
574 tdb_off_t movers[1 << TDB_HASH_GROUP_BITS];
575 enum TDB_ERROR ecode;
577 h->group[h->found_bucket] = 0;
578 for (i = 1; i < (1 << TDB_HASH_GROUP_BITS); i++) {
579 unsigned this_bucket;
581 this_bucket = (h->found_bucket+i) % (1 << TDB_HASH_GROUP_BITS);
582 /* Empty bucket? We're done. */
583 if (!h->group[this_bucket])
586 /* Ignore subhashes. */
587 if (is_subhash(h->group[this_bucket]))
590 /* If this one is not happy where it is, we'll move it. */
591 if ((h->group[this_bucket] & TDB_OFF_HASH_GROUP_MASK)
593 movers[num_movers++] = h->group[this_bucket];
594 h->group[this_bucket] = 0;
598 /* Put back the ones we erased. */
599 for (i = 0; i < num_movers; i++) {
600 force_into_group(h->group, movers[i] & TDB_OFF_HASH_GROUP_MASK,
604 /* Now we write back the hash group */
605 ecode = tdb_write_convert(tdb, h->group_start,
606 h->group, sizeof(h->group));
607 if (ecode != TDB_SUCCESS) {
614 int add_to_hash(struct tdb_context *tdb, struct hash_info *h, tdb_off_t new_off)
616 enum TDB_ERROR ecode;
618 /* We hit an empty bucket during search? That's where it goes. */
619 if (!h->group[h->found_bucket]) {
620 h->group[h->found_bucket] = encode_offset(new_off, h);
621 /* Write back the modified group. */
622 ecode = tdb_write_convert(tdb, h->group_start,
623 h->group, sizeof(h->group));
624 if (ecode != TDB_SUCCESS) {
631 if (h->hash_used > 64)
632 return add_to_chain(tdb, h->group_start, new_off);
634 /* We're full. Expand. */
635 if (expand_group(tdb, h) == -1)
638 if (is_subhash(h->group[h->home_bucket])) {
639 /* We were expanded! */
643 /* Write back the modified group. */
644 ecode = tdb_write_convert(tdb, h->group_start, h->group,
646 if (ecode != TDB_SUCCESS) {
651 /* Move hashinfo down a level. */
652 hashtable = (h->group[h->home_bucket] & TDB_OFF_MASK)
653 + sizeof(struct tdb_used_record);
654 gnum = use_bits(h,TDB_SUBLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS);
655 h->home_bucket = use_bits(h, TDB_HASH_GROUP_BITS);
656 h->group_start = hashtable
657 + gnum * (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
658 ecode = tdb_read_convert(tdb, h->group_start, &h->group,
660 if (ecode != TDB_SUCCESS) {
666 /* Expanding the group must have made room if it didn't choose this
668 if (put_into_group(h->group, h->home_bucket, encode_offset(new_off,h))){
669 ecode = tdb_write_convert(tdb, h->group_start,
670 h->group, sizeof(h->group));
671 if (ecode != TDB_SUCCESS) {
678 /* This can happen if all hashes in group (and us) dropped into same
679 * group in subhash. */
680 return add_to_hash(tdb, h, new_off);
683 /* Traverse support: returns offset of record, or 0 or TDB_OFF_ERR. */
684 static tdb_off_t iterate_hash(struct tdb_context *tdb,
685 struct traverse_info *tinfo)
689 struct traverse_level *tlevel;
691 tlevel = &tinfo->levels[tinfo->num_levels-1];
694 for (i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
695 tlevel->entry, tlevel->total_buckets);
696 i != tlevel->total_buckets;
697 i = tdb_find_nonzero_off(tdb, tlevel->hashtable,
698 i+1, tlevel->total_buckets)) {
699 val = tdb_read_off(tdb, tlevel->hashtable+sizeof(tdb_off_t)*i);
700 if (unlikely(val == TDB_OFF_ERR))
703 off = val & TDB_OFF_MASK;
705 /* This makes the delete-all-in-traverse case work
706 * (and simplifies our logic a little). */
707 if (off == tinfo->prev)
712 if (!is_subhash(val)) {
718 /* When we come back, we want the next one */
722 tlevel->hashtable = off + sizeof(struct tdb_used_record);
724 /* Next level is a chain? */
725 if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1))
726 tlevel->total_buckets = (1 << TDB_HASH_GROUP_BITS);
728 tlevel->total_buckets = (1 << TDB_SUBLEVEL_HASH_BITS);
733 if (tinfo->num_levels == 1)
736 /* Handle chained entries. */
737 if (unlikely(tinfo->num_levels == TDB_MAX_LEVELS + 1)) {
738 tlevel->hashtable = tdb_read_off(tdb, tlevel->hashtable
739 + offsetof(struct tdb_chain,
741 if (tlevel->hashtable == TDB_OFF_ERR)
743 if (tlevel->hashtable) {
744 tlevel->hashtable += sizeof(struct tdb_used_record);
750 /* Go back up and keep searching. */
756 /* Return 1 if we find something, 0 if not, -1 on error. */
757 int next_in_hash(struct tdb_context *tdb,
758 struct traverse_info *tinfo,
759 TDB_DATA *kbuf, size_t *dlen)
761 const unsigned group_bits = TDB_TOPLEVEL_HASH_BITS-TDB_HASH_GROUP_BITS;
762 tdb_off_t hl_start, hl_range, off;
763 enum TDB_ERROR ecode;
765 while (tinfo->toplevel_group < (1 << group_bits)) {
766 hl_start = (tdb_off_t)tinfo->toplevel_group
767 << (64 - group_bits);
768 hl_range = 1ULL << group_bits;
769 ecode = tdb_lock_hashes(tdb, hl_start, hl_range, F_RDLCK,
771 if (ecode != TDB_SUCCESS) {
776 off = iterate_hash(tdb, tinfo);
778 struct tdb_used_record rec;
780 ecode = tdb_read_convert(tdb, off, &rec, sizeof(rec));
781 if (ecode != TDB_SUCCESS) {
783 tdb_unlock_hashes(tdb,
784 hl_start, hl_range, F_RDLCK);
787 if (rec_magic(&rec) != TDB_USED_MAGIC) {
788 tdb_logerr(tdb, TDB_ERR_CORRUPT,
791 " corrupt record at %llu",
796 kbuf->dsize = rec_key_length(&rec);
798 /* They want data as well? */
800 *dlen = rec_data_length(&rec);
801 kbuf->dptr = tdb_alloc_read(tdb,
806 kbuf->dptr = tdb_alloc_read(tdb,
810 tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
811 return kbuf->dptr ? 1 : -1;
814 tdb_unlock_hashes(tdb, hl_start, hl_range, F_RDLCK);
816 tinfo->toplevel_group++;
817 tinfo->levels[0].hashtable
818 += (sizeof(tdb_off_t) << TDB_HASH_GROUP_BITS);
819 tinfo->levels[0].entry = 0;
824 /* Return 1 if we find something, 0 if not, -1 on error. */
825 int first_in_hash(struct tdb_context *tdb,
826 struct traverse_info *tinfo,
827 TDB_DATA *kbuf, size_t *dlen)
830 tinfo->toplevel_group = 0;
831 tinfo->num_levels = 1;
832 tinfo->levels[0].hashtable = offsetof(struct tdb_header, hashtable);
833 tinfo->levels[0].entry = 0;
834 tinfo->levels[0].total_buckets = (1 << TDB_HASH_GROUP_BITS);
836 return next_in_hash(tdb, tinfo, kbuf, dlen);
839 /* Even if the entry isn't in this hash bucket, you'd have to lock this
840 * bucket to find it. */
841 static int chainlock(struct tdb_context *tdb, const TDB_DATA *key,
842 int ltype, enum tdb_lock_flags waitflag,
845 enum TDB_ERROR ecode;
846 uint64_t h = tdb_hash(tdb, key->dptr, key->dsize);
847 tdb_off_t lockstart, locksize;
848 unsigned int group, gbits;
850 gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
851 group = bits_from(h, 64 - gbits, gbits);
853 lockstart = hlock_range(group, &locksize);
855 ecode = tdb_lock_hashes(tdb, lockstart, locksize, ltype, waitflag);
856 tdb_trace_1rec(tdb, func, *key);
857 if (ecode != TDB_SUCCESS) {
864 /* lock/unlock one hash chain. This is meant to be used to reduce
865 contention - it cannot guarantee how many records will be locked */
866 int tdb_chainlock(struct tdb_context *tdb, TDB_DATA key)
868 return chainlock(tdb, &key, F_WRLCK, TDB_LOCK_WAIT, "tdb_chainlock");
871 int tdb_chainunlock(struct tdb_context *tdb, TDB_DATA key)
873 uint64_t h = tdb_hash(tdb, key.dptr, key.dsize);
874 tdb_off_t lockstart, locksize;
875 unsigned int group, gbits;
876 enum TDB_ERROR ecode;
878 gbits = TDB_TOPLEVEL_HASH_BITS - TDB_HASH_GROUP_BITS;
879 group = bits_from(h, 64 - gbits, gbits);
881 lockstart = hlock_range(group, &locksize);
883 tdb_trace_1rec(tdb, "tdb_chainunlock", key);
884 ecode = tdb_unlock_hashes(tdb, lockstart, locksize, F_WRLCK);
885 if (ecode != TDB_SUCCESS) {